blob: f83bbf550ec0f30915e987b3345c0ef63421c088 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Bart Van Assche0bbb3b72017-01-20 13:04:37 -080034#include <linux/dma-mapping.h>
Yuval Shaia4d6f2852017-03-14 16:01:57 +020035#include <net/addrconf.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030036#include "rxe.h"
37#include "rxe_loc.h"
38#include "rxe_queue.h"
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +020039#include "rxe_hw_counters.h"
Moni Shoua8700e3e2016-06-16 16:45:23 +030040
41static int rxe_query_device(struct ib_device *dev,
42 struct ib_device_attr *attr,
43 struct ib_udata *uhw)
44{
45 struct rxe_dev *rxe = to_rdev(dev);
46
47 if (uhw->inlen || uhw->outlen)
48 return -EINVAL;
49
50 *attr = rxe->attr;
51 return 0;
52}
53
Moni Shoua8700e3e2016-06-16 16:45:23 +030054static int rxe_query_port(struct ib_device *dev,
55 u8 port_num, struct ib_port_attr *attr)
56{
57 struct rxe_dev *rxe = to_rdev(dev);
58 struct rxe_port *port;
Yuval Shaiad4186192017-06-14 23:13:34 +030059 int rc = -EINVAL;
Moni Shoua8700e3e2016-06-16 16:45:23 +030060
61 if (unlikely(port_num != 1)) {
62 pr_warn("invalid port_number %d\n", port_num);
Yuval Shaiad4186192017-06-14 23:13:34 +030063 goto out;
Moni Shoua8700e3e2016-06-16 16:45:23 +030064 }
65
66 port = &rxe->port;
67
Or Gerlitzc4550c62017-01-24 13:02:39 +020068 /* *attr being zeroed by the caller, avoid zeroing it here */
Moni Shoua8700e3e2016-06-16 16:45:23 +030069 *attr = port->attr;
70
71 mutex_lock(&rxe->usdev_lock);
Yuval Shaiad4186192017-06-14 23:13:34 +030072 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
73 &attr->active_width);
Moni Shoua8700e3e2016-06-16 16:45:23 +030074 mutex_unlock(&rxe->usdev_lock);
75
Yuval Shaiad4186192017-06-14 23:13:34 +030076out:
77 return rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030078}
79
Moni Shoua8700e3e2016-06-16 16:45:23 +030080static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
81 index, const union ib_gid *gid,
82 const struct ib_gid_attr *attr, void **context)
83{
84 if (index >= RXE_PORT_GID_TBL_LEN)
85 return -EINVAL;
86 return 0;
87}
88
89static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
90 index, void **context)
91{
92 if (index >= RXE_PORT_GID_TBL_LEN)
93 return -EINVAL;
94 return 0;
95}
96
97static struct net_device *rxe_get_netdev(struct ib_device *device,
98 u8 port_num)
99{
100 struct rxe_dev *rxe = to_rdev(device);
101
102 if (rxe->ndev) {
103 dev_hold(rxe->ndev);
104 return rxe->ndev;
105 }
106
107 return NULL;
108}
109
110static int rxe_query_pkey(struct ib_device *device,
111 u8 port_num, u16 index, u16 *pkey)
112{
113 struct rxe_dev *rxe = to_rdev(device);
114 struct rxe_port *port;
115
116 if (unlikely(port_num != 1)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800117 dev_warn(device->dev.parent, "invalid port_num = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300118 port_num);
119 goto err1;
120 }
121
122 port = &rxe->port;
123
124 if (unlikely(index >= port->attr.pkey_tbl_len)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800125 dev_warn(device->dev.parent, "invalid index = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300126 index);
127 goto err1;
128 }
129
130 *pkey = port->pkey_tbl[index];
131 return 0;
132
133err1:
134 return -EINVAL;
135}
136
137static int rxe_modify_device(struct ib_device *dev,
138 int mask, struct ib_device_modify *attr)
139{
140 struct rxe_dev *rxe = to_rdev(dev);
141
142 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
143 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
144
145 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
146 memcpy(rxe->ib_dev.node_desc,
147 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
148 }
149
150 return 0;
151}
152
153static int rxe_modify_port(struct ib_device *dev,
154 u8 port_num, int mask, struct ib_port_modify *attr)
155{
156 struct rxe_dev *rxe = to_rdev(dev);
157 struct rxe_port *port;
158
159 if (unlikely(port_num != 1)) {
160 pr_warn("invalid port_num = %d\n", port_num);
161 goto err1;
162 }
163
164 port = &rxe->port;
165
166 port->attr.port_cap_flags |= attr->set_port_cap_mask;
167 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
168
169 if (mask & IB_PORT_RESET_QKEY_CNTR)
170 port->attr.qkey_viol_cntr = 0;
171
172 return 0;
173
174err1:
175 return -EINVAL;
176}
177
178static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
179 u8 port_num)
180{
181 struct rxe_dev *rxe = to_rdev(dev);
182
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800183 return rxe_link_layer(rxe, port_num);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300184}
185
186static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
187 struct ib_udata *udata)
188{
189 struct rxe_dev *rxe = to_rdev(dev);
190 struct rxe_ucontext *uc;
191
192 uc = rxe_alloc(&rxe->uc_pool);
193 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
194}
195
196static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
197{
198 struct rxe_ucontext *uc = to_ruc(ibuc);
199
200 rxe_drop_ref(uc);
201 return 0;
202}
203
204static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
205 struct ib_port_immutable *immutable)
206{
207 int err;
208 struct ib_port_attr attr;
209
Or Gerlitzc4550c62017-01-24 13:02:39 +0200210 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
211
212 err = ib_query_port(dev, port_num, &attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300213 if (err)
214 return err;
215
216 immutable->pkey_tbl_len = attr.pkey_tbl_len;
217 immutable->gid_tbl_len = attr.gid_tbl_len;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300218 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
219
220 return 0;
221}
222
223static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
224 struct ib_ucontext *context,
225 struct ib_udata *udata)
226{
227 struct rxe_dev *rxe = to_rdev(dev);
228 struct rxe_pd *pd;
229
230 pd = rxe_alloc(&rxe->pd_pool);
231 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
232}
233
234static int rxe_dealloc_pd(struct ib_pd *ibpd)
235{
236 struct rxe_pd *pd = to_rpd(ibpd);
237
238 rxe_drop_ref(pd);
239 return 0;
240}
241
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400242static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300243 struct rxe_av *av)
244{
245 int err;
246 union ib_gid sgid;
247 struct ib_gid_attr sgid_attr;
248
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400249 err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
250 rdma_ah_read_grh(attr)->sgid_index, &sgid,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300251 &sgid_attr);
252 if (err) {
253 pr_err("Failed to query sgid. err = %d\n", err);
254 return err;
255 }
256
Zhu Yanjunca3d9fe2018-01-31 06:06:55 -0500257 rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
Zhu Yanjun316663c2018-01-31 06:06:59 -0500258 rxe_av_fill_ip_info(av, attr, &sgid_attr, &sgid);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300259
260 if (sgid_attr.ndev)
261 dev_put(sgid_attr.ndev);
Zhu Yanjun45a290f2018-01-31 06:06:58 -0500262 return 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300263}
264
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400265static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
266 struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +0200267 struct ib_udata *udata)
268
Moni Shoua8700e3e2016-06-16 16:45:23 +0300269{
270 int err;
271 struct rxe_dev *rxe = to_rdev(ibpd->device);
272 struct rxe_pd *pd = to_rpd(ibpd);
273 struct rxe_ah *ah;
274
275 err = rxe_av_chk_attr(rxe, attr);
276 if (err)
277 goto err1;
278
279 ah = rxe_alloc(&rxe->ah_pool);
280 if (!ah) {
281 err = -ENOMEM;
282 goto err1;
283 }
284
285 rxe_add_ref(pd);
286 ah->pd = pd;
287
288 err = rxe_init_av(rxe, attr, &ah->av);
289 if (err)
290 goto err2;
291
292 return &ah->ibah;
293
294err2:
295 rxe_drop_ref(pd);
296 rxe_drop_ref(ah);
297err1:
298 return ERR_PTR(err);
299}
300
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400301static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300302{
303 int err;
304 struct rxe_dev *rxe = to_rdev(ibah->device);
305 struct rxe_ah *ah = to_rah(ibah);
306
307 err = rxe_av_chk_attr(rxe, attr);
308 if (err)
309 return err;
310
311 err = rxe_init_av(rxe, attr, &ah->av);
312 if (err)
313 return err;
314
315 return 0;
316}
317
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400318static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300319{
Moni Shoua8700e3e2016-06-16 16:45:23 +0300320 struct rxe_ah *ah = to_rah(ibah);
321
Dasaratharaman Chandramoulieca7ddf2017-04-29 14:41:17 -0400322 memset(attr, 0, sizeof(*attr));
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400323 attr->type = ibah->type;
Zhu Yanjun9c96f3d2018-01-31 06:06:56 -0500324 rxe_av_to_attr(&ah->av, attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300325 return 0;
326}
327
328static int rxe_destroy_ah(struct ib_ah *ibah)
329{
330 struct rxe_ah *ah = to_rah(ibah);
331
332 rxe_drop_ref(ah->pd);
333 rxe_drop_ref(ah);
334 return 0;
335}
336
337static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
338{
339 int err;
340 int i;
341 u32 length;
342 struct rxe_recv_wqe *recv_wqe;
343 int num_sge = ibwr->num_sge;
344
345 if (unlikely(queue_full(rq->queue))) {
346 err = -ENOMEM;
347 goto err1;
348 }
349
350 if (unlikely(num_sge > rq->max_sge)) {
351 err = -EINVAL;
352 goto err1;
353 }
354
355 length = 0;
356 for (i = 0; i < num_sge; i++)
357 length += ibwr->sg_list[i].length;
358
359 recv_wqe = producer_addr(rq->queue);
360 recv_wqe->wr_id = ibwr->wr_id;
361 recv_wqe->num_sge = num_sge;
362
363 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
364 num_sge * sizeof(struct ib_sge));
365
366 recv_wqe->dma.length = length;
367 recv_wqe->dma.resid = length;
368 recv_wqe->dma.num_sge = num_sge;
369 recv_wqe->dma.cur_sge = 0;
370 recv_wqe->dma.sge_offset = 0;
371
372 /* make sure all changes to the work queue are written before we
373 * update the producer pointer
374 */
375 smp_wmb();
376
377 advance_producer(rq->queue);
378 return 0;
379
380err1:
381 return err;
382}
383
384static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
385 struct ib_srq_init_attr *init,
386 struct ib_udata *udata)
387{
388 int err;
389 struct rxe_dev *rxe = to_rdev(ibpd->device);
390 struct rxe_pd *pd = to_rpd(ibpd);
391 struct rxe_srq *srq;
392 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600393 struct rxe_create_srq_resp __user *uresp = NULL;
394
395 if (udata) {
396 if (udata->outlen < sizeof(*uresp))
397 return ERR_PTR(-EINVAL);
398 uresp = udata->outbuf;
399 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300400
401 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
402 if (err)
403 goto err1;
404
405 srq = rxe_alloc(&rxe->srq_pool);
406 if (!srq) {
407 err = -ENOMEM;
408 goto err1;
409 }
410
411 rxe_add_index(srq);
412 rxe_add_ref(pd);
413 srq->pd = pd;
414
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600415 err = rxe_srq_from_init(rxe, srq, init, context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300416 if (err)
417 goto err2;
418
419 return &srq->ibsrq;
420
421err2:
422 rxe_drop_ref(pd);
423 rxe_drop_index(srq);
424 rxe_drop_ref(srq);
425err1:
426 return ERR_PTR(err);
427}
428
429static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
430 enum ib_srq_attr_mask mask,
431 struct ib_udata *udata)
432{
433 int err;
434 struct rxe_srq *srq = to_rsrq(ibsrq);
435 struct rxe_dev *rxe = to_rdev(ibsrq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600436 struct rxe_modify_srq_cmd ucmd = {};
437
438 if (udata) {
439 if (udata->inlen < sizeof(ucmd))
440 return -EINVAL;
441
442 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
443 if (err)
444 return err;
445 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300446
447 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
448 if (err)
449 goto err1;
450
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600451 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300452 if (err)
453 goto err1;
454
455 return 0;
456
457err1:
458 return err;
459}
460
461static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
462{
463 struct rxe_srq *srq = to_rsrq(ibsrq);
464
465 if (srq->error)
466 return -EINVAL;
467
468 attr->max_wr = srq->rq.queue->buf->index_mask;
469 attr->max_sge = srq->rq.max_sge;
470 attr->srq_limit = srq->limit;
471 return 0;
472}
473
474static int rxe_destroy_srq(struct ib_srq *ibsrq)
475{
476 struct rxe_srq *srq = to_rsrq(ibsrq);
477
478 if (srq->rq.queue)
479 rxe_queue_cleanup(srq->rq.queue);
480
481 rxe_drop_ref(srq->pd);
482 rxe_drop_index(srq);
483 rxe_drop_ref(srq);
484
485 return 0;
486}
487
488static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
489 struct ib_recv_wr **bad_wr)
490{
491 int err = 0;
492 unsigned long flags;
493 struct rxe_srq *srq = to_rsrq(ibsrq);
494
495 spin_lock_irqsave(&srq->rq.producer_lock, flags);
496
497 while (wr) {
498 err = post_one_recv(&srq->rq, wr);
499 if (unlikely(err))
500 break;
501 wr = wr->next;
502 }
503
504 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
505
506 if (err)
507 *bad_wr = wr;
508
509 return err;
510}
511
512static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
513 struct ib_qp_init_attr *init,
514 struct ib_udata *udata)
515{
516 int err;
517 struct rxe_dev *rxe = to_rdev(ibpd->device);
518 struct rxe_pd *pd = to_rpd(ibpd);
519 struct rxe_qp *qp;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600520 struct rxe_create_qp_resp __user *uresp = NULL;
521
522 if (udata) {
523 if (udata->outlen < sizeof(*uresp))
524 return ERR_PTR(-EINVAL);
525 uresp = udata->outbuf;
526 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300527
528 err = rxe_qp_chk_init(rxe, init);
529 if (err)
530 goto err1;
531
532 qp = rxe_alloc(&rxe->qp_pool);
533 if (!qp) {
534 err = -ENOMEM;
535 goto err1;
536 }
537
538 if (udata) {
539 if (udata->inlen) {
540 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500541 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300542 }
543 qp->is_user = 1;
544 }
545
546 rxe_add_index(qp);
547
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600548 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300549 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500550 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300551
552 return &qp->ibqp;
553
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500554err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300555 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500556err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300557 rxe_drop_ref(qp);
558err1:
559 return ERR_PTR(err);
560}
561
562static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
563 int mask, struct ib_udata *udata)
564{
565 int err;
566 struct rxe_dev *rxe = to_rdev(ibqp->device);
567 struct rxe_qp *qp = to_rqp(ibqp);
568
569 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
570 if (err)
571 goto err1;
572
573 err = rxe_qp_from_attr(qp, attr, mask, udata);
574 if (err)
575 goto err1;
576
577 return 0;
578
579err1:
580 return err;
581}
582
583static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
584 int mask, struct ib_qp_init_attr *init)
585{
586 struct rxe_qp *qp = to_rqp(ibqp);
587
588 rxe_qp_to_init(qp, init);
589 rxe_qp_to_attr(qp, attr, mask);
590
591 return 0;
592}
593
594static int rxe_destroy_qp(struct ib_qp *ibqp)
595{
596 struct rxe_qp *qp = to_rqp(ibqp);
597
598 rxe_qp_destroy(qp);
599 rxe_drop_index(qp);
600 rxe_drop_ref(qp);
601 return 0;
602}
603
604static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
605 unsigned int mask, unsigned int length)
606{
607 int num_sge = ibwr->num_sge;
608 struct rxe_sq *sq = &qp->sq;
609
610 if (unlikely(num_sge > sq->max_sge))
611 goto err1;
612
613 if (unlikely(mask & WR_ATOMIC_MASK)) {
614 if (length < 8)
615 goto err1;
616
617 if (atomic_wr(ibwr)->remote_addr & 0x7)
618 goto err1;
619 }
620
621 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
622 (length > sq->max_inline)))
623 goto err1;
624
625 return 0;
626
627err1:
628 return -EINVAL;
629}
630
631static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
632 struct ib_send_wr *ibwr)
633{
634 wr->wr_id = ibwr->wr_id;
635 wr->num_sge = ibwr->num_sge;
636 wr->opcode = ibwr->opcode;
637 wr->send_flags = ibwr->send_flags;
638
639 if (qp_type(qp) == IB_QPT_UD ||
640 qp_type(qp) == IB_QPT_SMI ||
641 qp_type(qp) == IB_QPT_GSI) {
642 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
643 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
644 if (qp_type(qp) == IB_QPT_GSI)
645 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
646 if (wr->opcode == IB_WR_SEND_WITH_IMM)
647 wr->ex.imm_data = ibwr->ex.imm_data;
648 } else {
649 switch (wr->opcode) {
650 case IB_WR_RDMA_WRITE_WITH_IMM:
651 wr->ex.imm_data = ibwr->ex.imm_data;
Bart Van Asscheea6ee932017-10-11 10:49:24 -0700652 /* fall through */
Moni Shoua8700e3e2016-06-16 16:45:23 +0300653 case IB_WR_RDMA_READ:
654 case IB_WR_RDMA_WRITE:
655 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
656 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
657 break;
658 case IB_WR_SEND_WITH_IMM:
659 wr->ex.imm_data = ibwr->ex.imm_data;
660 break;
661 case IB_WR_SEND_WITH_INV:
662 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
663 break;
664 case IB_WR_ATOMIC_CMP_AND_SWP:
665 case IB_WR_ATOMIC_FETCH_AND_ADD:
666 wr->wr.atomic.remote_addr =
667 atomic_wr(ibwr)->remote_addr;
668 wr->wr.atomic.compare_add =
669 atomic_wr(ibwr)->compare_add;
670 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
671 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
672 break;
673 case IB_WR_LOCAL_INV:
674 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
675 break;
676 case IB_WR_REG_MR:
677 wr->wr.reg.mr = reg_wr(ibwr)->mr;
678 wr->wr.reg.key = reg_wr(ibwr)->key;
679 wr->wr.reg.access = reg_wr(ibwr)->access;
680 break;
681 default:
682 break;
683 }
684 }
685}
686
687static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
688 unsigned int mask, unsigned int length,
689 struct rxe_send_wqe *wqe)
690{
691 int num_sge = ibwr->num_sge;
692 struct ib_sge *sge;
693 int i;
694 u8 *p;
695
696 init_send_wr(qp, &wqe->wr, ibwr);
697
698 if (qp_type(qp) == IB_QPT_UD ||
699 qp_type(qp) == IB_QPT_SMI ||
700 qp_type(qp) == IB_QPT_GSI)
701 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
702
703 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
704 p = wqe->dma.inline_data;
705
706 sge = ibwr->sg_list;
707 for (i = 0; i < num_sge; i++, sge++) {
Jia-Ju Bai07d432b2017-06-05 20:23:40 +0800708 memcpy(p, (void *)(uintptr_t)sge->addr,
709 sge->length);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300710
711 p += sge->length;
712 }
713 } else if (mask & WR_REG_MASK) {
714 wqe->mask = mask;
715 wqe->state = wqe_state_posted;
716 return 0;
717 } else
718 memcpy(wqe->dma.sge, ibwr->sg_list,
719 num_sge * sizeof(struct ib_sge));
720
Bart Van Asschea6544a62018-03-01 14:00:29 -0800721 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
722 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300723 wqe->mask = mask;
724 wqe->dma.length = length;
725 wqe->dma.resid = length;
726 wqe->dma.num_sge = num_sge;
727 wqe->dma.cur_sge = 0;
728 wqe->dma.sge_offset = 0;
729 wqe->state = wqe_state_posted;
730 wqe->ssn = atomic_add_return(1, &qp->ssn);
731
732 return 0;
733}
734
735static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000736 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300737{
738 int err;
739 struct rxe_sq *sq = &qp->sq;
740 struct rxe_send_wqe *send_wqe;
741 unsigned long flags;
742
743 err = validate_send_wr(qp, ibwr, mask, length);
744 if (err)
745 return err;
746
747 spin_lock_irqsave(&qp->sq.sq_lock, flags);
748
749 if (unlikely(queue_full(sq->queue))) {
750 err = -ENOMEM;
751 goto err1;
752 }
753
754 send_wqe = producer_addr(sq->queue);
755
756 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
757 if (unlikely(err))
758 goto err1;
759
760 /*
761 * make sure all changes to the work queue are
762 * written before we update the producer pointer
763 */
764 smp_wmb();
765
766 advance_producer(sq->queue);
767 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
768
769 return 0;
770
771err1:
772 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
773 return err;
774}
775
Parav Pandit063af592016-09-28 20:24:12 +0000776static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
777 struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300778{
779 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300780 unsigned int mask;
781 unsigned int length = 0;
782 int i;
783 int must_sched;
784
Moni Shoua8700e3e2016-06-16 16:45:23 +0300785 while (wr) {
786 mask = wr_opcode_mask(wr->opcode, qp);
787 if (unlikely(!mask)) {
788 err = -EINVAL;
789 *bad_wr = wr;
790 break;
791 }
792
793 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
794 !(mask & WR_INLINE_MASK))) {
795 err = -EINVAL;
796 *bad_wr = wr;
797 break;
798 }
799
800 length = 0;
801 for (i = 0; i < wr->num_sge; i++)
802 length += wr->sg_list[i].length;
803
804 err = post_one_send(qp, wr, mask, length);
805
806 if (err) {
807 *bad_wr = wr;
808 break;
809 }
810 wr = wr->next;
811 }
812
813 /*
814 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
815 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
816 */
817 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
818 (queue_count(qp->sq.queue) > 1);
819
820 rxe_run_task(&qp->req.task, must_sched);
Bart Van Assche6f301e02018-01-09 11:23:40 -0800821 if (unlikely(qp->req.state == QP_STATE_ERROR))
822 rxe_run_task(&qp->comp.task, 1);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300823
824 return err;
825}
826
Parav Pandit063af592016-09-28 20:24:12 +0000827static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
828 struct ib_send_wr **bad_wr)
829{
830 struct rxe_qp *qp = to_rqp(ibqp);
831
832 if (unlikely(!qp->valid)) {
833 *bad_wr = wr;
834 return -EINVAL;
835 }
836
837 if (unlikely(qp->req.state < QP_STATE_READY)) {
838 *bad_wr = wr;
839 return -EINVAL;
840 }
841
842 if (qp->is_user) {
843 /* Utilize process context to do protocol processing */
844 rxe_run_task(&qp->req.task, 0);
845 return 0;
846 } else
847 return rxe_post_send_kernel(qp, wr, bad_wr);
848}
849
Moni Shoua8700e3e2016-06-16 16:45:23 +0300850static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
851 struct ib_recv_wr **bad_wr)
852{
853 int err = 0;
854 struct rxe_qp *qp = to_rqp(ibqp);
855 struct rxe_rq *rq = &qp->rq;
856 unsigned long flags;
857
858 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
859 *bad_wr = wr;
860 err = -EINVAL;
861 goto err1;
862 }
863
864 if (unlikely(qp->srq)) {
865 *bad_wr = wr;
866 err = -EINVAL;
867 goto err1;
868 }
869
870 spin_lock_irqsave(&rq->producer_lock, flags);
871
872 while (wr) {
873 err = post_one_recv(rq, wr);
874 if (unlikely(err)) {
875 *bad_wr = wr;
876 break;
877 }
878 wr = wr->next;
879 }
880
881 spin_unlock_irqrestore(&rq->producer_lock, flags);
882
Vijay Immanuel12171972017-06-27 12:19:38 +0300883 if (qp->resp.state == QP_STATE_ERROR)
884 rxe_run_task(&qp->resp.task, 1);
885
Moni Shoua8700e3e2016-06-16 16:45:23 +0300886err1:
887 return err;
888}
889
890static struct ib_cq *rxe_create_cq(struct ib_device *dev,
891 const struct ib_cq_init_attr *attr,
892 struct ib_ucontext *context,
893 struct ib_udata *udata)
894{
895 int err;
896 struct rxe_dev *rxe = to_rdev(dev);
897 struct rxe_cq *cq;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600898 struct rxe_create_cq_resp __user *uresp = NULL;
899
900 if (udata) {
901 if (udata->outlen < sizeof(*uresp))
902 return ERR_PTR(-EINVAL);
903 uresp = udata->outbuf;
904 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300905
906 if (attr->flags)
907 return ERR_PTR(-EINVAL);
908
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600909 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300910 if (err)
911 goto err1;
912
913 cq = rxe_alloc(&rxe->cq_pool);
914 if (!cq) {
915 err = -ENOMEM;
916 goto err1;
917 }
918
919 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600920 context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300921 if (err)
922 goto err2;
923
924 return &cq->ibcq;
925
926err2:
927 rxe_drop_ref(cq);
928err1:
929 return ERR_PTR(err);
930}
931
932static int rxe_destroy_cq(struct ib_cq *ibcq)
933{
934 struct rxe_cq *cq = to_rcq(ibcq);
935
Andrew Boyerbfc3ae02017-08-28 16:11:50 -0400936 rxe_cq_disable(cq);
937
Moni Shoua8700e3e2016-06-16 16:45:23 +0300938 rxe_drop_ref(cq);
939 return 0;
940}
941
942static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
943{
944 int err;
945 struct rxe_cq *cq = to_rcq(ibcq);
946 struct rxe_dev *rxe = to_rdev(ibcq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600947 struct rxe_resize_cq_resp __user *uresp = NULL;
948
949 if (udata) {
950 if (udata->outlen < sizeof(*uresp))
951 return -EINVAL;
952 uresp = udata->outbuf;
953 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300954
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600955 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300956 if (err)
957 goto err1;
958
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600959 err = rxe_cq_resize_queue(cq, cqe, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300960 if (err)
961 goto err1;
962
963 return 0;
964
965err1:
966 return err;
967}
968
969static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
970{
971 int i;
972 struct rxe_cq *cq = to_rcq(ibcq);
973 struct rxe_cqe *cqe;
974 unsigned long flags;
975
976 spin_lock_irqsave(&cq->cq_lock, flags);
977 for (i = 0; i < num_entries; i++) {
978 cqe = queue_head(cq->queue);
979 if (!cqe)
980 break;
981
982 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
983 advance_consumer(cq->queue);
984 }
985 spin_unlock_irqrestore(&cq->cq_lock, flags);
986
987 return i;
988}
989
990static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
991{
992 struct rxe_cq *cq = to_rcq(ibcq);
993 int count = queue_count(cq->queue);
994
995 return (count > wc_cnt) ? wc_cnt : count;
996}
997
998static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
999{
1000 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -05001001 unsigned long irq_flags;
1002 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001003
Andrew Boyeraccacb82016-11-23 12:39:22 -05001004 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001005 if (cq->notify != IB_CQ_NEXT_COMP)
1006 cq->notify = flags & IB_CQ_SOLICITED_MASK;
1007
Andrew Boyeraccacb82016-11-23 12:39:22 -05001008 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
1009 ret = 1;
1010
1011 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1012
1013 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001014}
1015
1016static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1017{
1018 struct rxe_dev *rxe = to_rdev(ibpd->device);
1019 struct rxe_pd *pd = to_rpd(ibpd);
1020 struct rxe_mem *mr;
1021 int err;
1022
1023 mr = rxe_alloc(&rxe->mr_pool);
1024 if (!mr) {
1025 err = -ENOMEM;
1026 goto err1;
1027 }
1028
1029 rxe_add_index(mr);
1030
1031 rxe_add_ref(pd);
1032
1033 err = rxe_mem_init_dma(rxe, pd, access, mr);
1034 if (err)
1035 goto err2;
1036
1037 return &mr->ibmr;
1038
1039err2:
1040 rxe_drop_ref(pd);
1041 rxe_drop_index(mr);
1042 rxe_drop_ref(mr);
1043err1:
1044 return ERR_PTR(err);
1045}
1046
1047static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1048 u64 start,
1049 u64 length,
1050 u64 iova,
1051 int access, struct ib_udata *udata)
1052{
1053 int err;
1054 struct rxe_dev *rxe = to_rdev(ibpd->device);
1055 struct rxe_pd *pd = to_rpd(ibpd);
1056 struct rxe_mem *mr;
1057
1058 mr = rxe_alloc(&rxe->mr_pool);
1059 if (!mr) {
1060 err = -ENOMEM;
1061 goto err2;
1062 }
1063
1064 rxe_add_index(mr);
1065
1066 rxe_add_ref(pd);
1067
1068 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1069 access, udata, mr);
1070 if (err)
1071 goto err3;
1072
1073 return &mr->ibmr;
1074
1075err3:
1076 rxe_drop_ref(pd);
1077 rxe_drop_index(mr);
1078 rxe_drop_ref(mr);
1079err2:
1080 return ERR_PTR(err);
1081}
1082
1083static int rxe_dereg_mr(struct ib_mr *ibmr)
1084{
1085 struct rxe_mem *mr = to_rmr(ibmr);
1086
1087 mr->state = RXE_MEM_STATE_ZOMBIE;
1088 rxe_drop_ref(mr->pd);
1089 rxe_drop_index(mr);
1090 rxe_drop_ref(mr);
1091 return 0;
1092}
1093
1094static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1095 enum ib_mr_type mr_type,
1096 u32 max_num_sg)
1097{
1098 struct rxe_dev *rxe = to_rdev(ibpd->device);
1099 struct rxe_pd *pd = to_rpd(ibpd);
1100 struct rxe_mem *mr;
1101 int err;
1102
1103 if (mr_type != IB_MR_TYPE_MEM_REG)
1104 return ERR_PTR(-EINVAL);
1105
1106 mr = rxe_alloc(&rxe->mr_pool);
1107 if (!mr) {
1108 err = -ENOMEM;
1109 goto err1;
1110 }
1111
1112 rxe_add_index(mr);
1113
1114 rxe_add_ref(pd);
1115
1116 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1117 if (err)
1118 goto err2;
1119
1120 return &mr->ibmr;
1121
1122err2:
1123 rxe_drop_ref(pd);
1124 rxe_drop_index(mr);
1125 rxe_drop_ref(mr);
1126err1:
1127 return ERR_PTR(err);
1128}
1129
1130static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1131{
1132 struct rxe_mem *mr = to_rmr(ibmr);
1133 struct rxe_map *map;
1134 struct rxe_phys_buf *buf;
1135
1136 if (unlikely(mr->nbuf == mr->num_buf))
1137 return -ENOMEM;
1138
1139 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1140 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1141
1142 buf->addr = addr;
1143 buf->size = ibmr->page_size;
1144 mr->nbuf++;
1145
1146 return 0;
1147}
1148
Parav Pandite404f942016-09-28 20:26:26 +00001149static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1150 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001151{
1152 struct rxe_mem *mr = to_rmr(ibmr);
1153 int n;
1154
1155 mr->nbuf = 0;
1156
1157 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1158
1159 mr->va = ibmr->iova;
1160 mr->iova = ibmr->iova;
1161 mr->length = ibmr->length;
1162 mr->page_shift = ilog2(ibmr->page_size);
1163 mr->page_mask = ibmr->page_size - 1;
1164 mr->offset = mr->iova & mr->page_mask;
1165
1166 return n;
1167}
1168
1169static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1170{
1171 int err;
1172 struct rxe_dev *rxe = to_rdev(ibqp->device);
1173 struct rxe_qp *qp = to_rqp(ibqp);
1174 struct rxe_mc_grp *grp;
1175
1176 /* takes a ref on grp if successful */
1177 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1178 if (err)
1179 return err;
1180
1181 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1182
1183 rxe_drop_ref(grp);
1184 return err;
1185}
1186
1187static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1188{
1189 struct rxe_dev *rxe = to_rdev(ibqp->device);
1190 struct rxe_qp *qp = to_rqp(ibqp);
1191
1192 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1193}
1194
Kamal Heibc05d2662017-06-15 11:29:05 +03001195static ssize_t parent_show(struct device *device,
1196 struct device_attribute *attr, char *buf)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001197{
1198 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1199 ib_dev.dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001200
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001201 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001202}
1203
Kamal Heibc05d2662017-06-15 11:29:05 +03001204static DEVICE_ATTR_RO(parent);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001205
1206static struct device_attribute *rxe_dev_attributes[] = {
1207 &dev_attr_parent,
1208};
1209
1210int rxe_register_device(struct rxe_dev *rxe)
1211{
1212 int err;
1213 int i;
1214 struct ib_device *dev = &rxe->ib_dev;
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001215 struct crypto_shash *tfm;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001216
1217 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1218 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1219
1220 dev->owner = THIS_MODULE;
1221 dev->node_type = RDMA_NODE_IB_CA;
1222 dev->phys_port_cnt = 1;
Sagi Grimberg67cf3622017-05-04 16:23:07 +03001223 dev->num_comp_vectors = num_possible_cpus();
Bart Van Assche85e9f1d2017-01-20 13:04:29 -08001224 dev->dev.parent = rxe_dma_device(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001225 dev->local_dma_lkey = 0;
Yuval Shaia4d6f2852017-03-14 16:01:57 +02001226 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1227 rxe->ndev->dev_addr);
Bart Van Assche0bbb3b72017-01-20 13:04:37 -08001228 dev->dev.dma_ops = &dma_virt_ops;
yonatanc56012e12017-06-22 17:10:00 +03001229 dma_coerce_mask_and_coherent(&dev->dev,
1230 dma_get_required_mask(dev->dev.parent));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001231
1232 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1233 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1234 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1235 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1236 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1237 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1238 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1239 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1240 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1241 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1242 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1243 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1244 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1245 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1246 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1247 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1248 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1249 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1250 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1251 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1252 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1253 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1254 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1255 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1256 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1257 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1258 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1259 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1260 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1261 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1262 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1263 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1264 ;
1265
1266 dev->query_device = rxe_query_device;
1267 dev->modify_device = rxe_modify_device;
1268 dev->query_port = rxe_query_port;
1269 dev->modify_port = rxe_modify_port;
1270 dev->get_link_layer = rxe_get_link_layer;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001271 dev->get_netdev = rxe_get_netdev;
1272 dev->add_gid = rxe_add_gid;
1273 dev->del_gid = rxe_del_gid;
1274 dev->query_pkey = rxe_query_pkey;
1275 dev->alloc_ucontext = rxe_alloc_ucontext;
1276 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1277 dev->mmap = rxe_mmap;
1278 dev->get_port_immutable = rxe_port_immutable;
1279 dev->alloc_pd = rxe_alloc_pd;
1280 dev->dealloc_pd = rxe_dealloc_pd;
1281 dev->create_ah = rxe_create_ah;
1282 dev->modify_ah = rxe_modify_ah;
1283 dev->query_ah = rxe_query_ah;
1284 dev->destroy_ah = rxe_destroy_ah;
1285 dev->create_srq = rxe_create_srq;
1286 dev->modify_srq = rxe_modify_srq;
1287 dev->query_srq = rxe_query_srq;
1288 dev->destroy_srq = rxe_destroy_srq;
1289 dev->post_srq_recv = rxe_post_srq_recv;
1290 dev->create_qp = rxe_create_qp;
1291 dev->modify_qp = rxe_modify_qp;
1292 dev->query_qp = rxe_query_qp;
1293 dev->destroy_qp = rxe_destroy_qp;
1294 dev->post_send = rxe_post_send;
1295 dev->post_recv = rxe_post_recv;
1296 dev->create_cq = rxe_create_cq;
1297 dev->destroy_cq = rxe_destroy_cq;
1298 dev->resize_cq = rxe_resize_cq;
1299 dev->poll_cq = rxe_poll_cq;
1300 dev->peek_cq = rxe_peek_cq;
1301 dev->req_notify_cq = rxe_req_notify_cq;
1302 dev->get_dma_mr = rxe_get_dma_mr;
1303 dev->reg_user_mr = rxe_reg_user_mr;
1304 dev->dereg_mr = rxe_dereg_mr;
1305 dev->alloc_mr = rxe_alloc_mr;
1306 dev->map_mr_sg = rxe_map_mr_sg;
1307 dev->attach_mcast = rxe_attach_mcast;
1308 dev->detach_mcast = rxe_detach_mcast;
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +02001309 dev->get_hw_stats = rxe_ib_get_hw_stats;
1310 dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001311
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001312 tfm = crypto_alloc_shash("crc32", 0, 0);
1313 if (IS_ERR(tfm)) {
Colin Ian King27b0b832017-04-24 10:26:42 +01001314 pr_err("failed to allocate crc algorithm err:%ld\n",
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001315 PTR_ERR(tfm));
1316 return PTR_ERR(tfm);
yonatanccee26882017-04-20 20:55:55 +03001317 }
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001318 rxe->tfm = tfm;
yonatanccee26882017-04-20 20:55:55 +03001319
Matan Barak0ede73b2018-03-19 15:02:34 +02001320 dev->driver_id = RDMA_DRIVER_RXE;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001321 err = ib_register_device(dev, NULL);
1322 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001323 pr_warn("%s failed with error %d\n", __func__, err);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001324 goto err1;
1325 }
1326
1327 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1328 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1329 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001330 pr_warn("%s failed with error %d for attr number %d\n",
1331 __func__, err, i);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001332 goto err2;
1333 }
1334 }
1335
1336 return 0;
1337
1338err2:
1339 ib_unregister_device(dev);
1340err1:
yonatanccee26882017-04-20 20:55:55 +03001341 crypto_free_shash(rxe->tfm);
1342
Moni Shoua8700e3e2016-06-16 16:45:23 +03001343 return err;
1344}
1345
1346int rxe_unregister_device(struct rxe_dev *rxe)
1347{
1348 int i;
1349 struct ib_device *dev = &rxe->ib_dev;
1350
1351 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1352 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1353
1354 ib_unregister_device(dev);
1355
1356 return 0;
1357}