blob: 321cca6cf4cd3cc63c2dd442672f46b0f5be8d0d [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Bart Van Assche0bbb3b72017-01-20 13:04:37 -080034#include <linux/dma-mapping.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030035#include "rxe.h"
36#include "rxe_loc.h"
37#include "rxe_queue.h"
38
39static int rxe_query_device(struct ib_device *dev,
40 struct ib_device_attr *attr,
41 struct ib_udata *uhw)
42{
43 struct rxe_dev *rxe = to_rdev(dev);
44
45 if (uhw->inlen || uhw->outlen)
46 return -EINVAL;
47
48 *attr = rxe->attr;
49 return 0;
50}
51
52static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
53 u8 *active_width)
54{
55 if (speed <= 1000) {
56 *active_width = IB_WIDTH_1X;
57 *active_speed = IB_SPEED_SDR;
58 } else if (speed <= 10000) {
59 *active_width = IB_WIDTH_1X;
60 *active_speed = IB_SPEED_FDR10;
61 } else if (speed <= 20000) {
62 *active_width = IB_WIDTH_4X;
63 *active_speed = IB_SPEED_DDR;
64 } else if (speed <= 30000) {
65 *active_width = IB_WIDTH_4X;
66 *active_speed = IB_SPEED_QDR;
67 } else if (speed <= 40000) {
68 *active_width = IB_WIDTH_4X;
69 *active_speed = IB_SPEED_FDR10;
70 } else {
71 *active_width = IB_WIDTH_4X;
72 *active_speed = IB_SPEED_EDR;
73 }
74}
75
76static int rxe_query_port(struct ib_device *dev,
77 u8 port_num, struct ib_port_attr *attr)
78{
79 struct rxe_dev *rxe = to_rdev(dev);
80 struct rxe_port *port;
81 u32 speed;
82
83 if (unlikely(port_num != 1)) {
84 pr_warn("invalid port_number %d\n", port_num);
85 goto err1;
86 }
87
88 port = &rxe->port;
89
90 *attr = port->attr;
91
92 mutex_lock(&rxe->usdev_lock);
93 if (rxe->ndev->ethtool_ops->get_link_ksettings) {
94 struct ethtool_link_ksettings ks;
95
96 rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
97 speed = ks.base.speed;
98 } else if (rxe->ndev->ethtool_ops->get_settings) {
99 struct ethtool_cmd cmd;
100
101 rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
102 speed = cmd.speed;
103 } else {
Parav Pandite404f942016-09-28 20:26:26 +0000104 pr_warn("%s speed is unknown, defaulting to 1000\n",
105 rxe->ndev->name);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300106 speed = 1000;
107 }
Parav Pandite404f942016-09-28 20:26:26 +0000108 rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
109 &attr->active_width);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300110 mutex_unlock(&rxe->usdev_lock);
111
112 return 0;
113
114err1:
115 return -EINVAL;
116}
117
118static int rxe_query_gid(struct ib_device *device,
119 u8 port_num, int index, union ib_gid *gid)
120{
121 int ret;
122
123 if (index > RXE_PORT_GID_TBL_LEN)
124 return -EINVAL;
125
126 ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
127 if (ret == -EAGAIN) {
128 memcpy(gid, &zgid, sizeof(*gid));
129 return 0;
130 }
131
132 return ret;
133}
134
135static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
136 index, const union ib_gid *gid,
137 const struct ib_gid_attr *attr, void **context)
138{
139 if (index >= RXE_PORT_GID_TBL_LEN)
140 return -EINVAL;
141 return 0;
142}
143
144static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
145 index, void **context)
146{
147 if (index >= RXE_PORT_GID_TBL_LEN)
148 return -EINVAL;
149 return 0;
150}
151
152static struct net_device *rxe_get_netdev(struct ib_device *device,
153 u8 port_num)
154{
155 struct rxe_dev *rxe = to_rdev(device);
156
157 if (rxe->ndev) {
158 dev_hold(rxe->ndev);
159 return rxe->ndev;
160 }
161
162 return NULL;
163}
164
165static int rxe_query_pkey(struct ib_device *device,
166 u8 port_num, u16 index, u16 *pkey)
167{
168 struct rxe_dev *rxe = to_rdev(device);
169 struct rxe_port *port;
170
171 if (unlikely(port_num != 1)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800172 dev_warn(device->dev.parent, "invalid port_num = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300173 port_num);
174 goto err1;
175 }
176
177 port = &rxe->port;
178
179 if (unlikely(index >= port->attr.pkey_tbl_len)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800180 dev_warn(device->dev.parent, "invalid index = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300181 index);
182 goto err1;
183 }
184
185 *pkey = port->pkey_tbl[index];
186 return 0;
187
188err1:
189 return -EINVAL;
190}
191
192static int rxe_modify_device(struct ib_device *dev,
193 int mask, struct ib_device_modify *attr)
194{
195 struct rxe_dev *rxe = to_rdev(dev);
196
197 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
198 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
199
200 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
201 memcpy(rxe->ib_dev.node_desc,
202 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
203 }
204
205 return 0;
206}
207
208static int rxe_modify_port(struct ib_device *dev,
209 u8 port_num, int mask, struct ib_port_modify *attr)
210{
211 struct rxe_dev *rxe = to_rdev(dev);
212 struct rxe_port *port;
213
214 if (unlikely(port_num != 1)) {
215 pr_warn("invalid port_num = %d\n", port_num);
216 goto err1;
217 }
218
219 port = &rxe->port;
220
221 port->attr.port_cap_flags |= attr->set_port_cap_mask;
222 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
223
224 if (mask & IB_PORT_RESET_QKEY_CNTR)
225 port->attr.qkey_viol_cntr = 0;
226
227 return 0;
228
229err1:
230 return -EINVAL;
231}
232
233static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
234 u8 port_num)
235{
236 struct rxe_dev *rxe = to_rdev(dev);
237
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800238 return rxe_link_layer(rxe, port_num);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300239}
240
241static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
242 struct ib_udata *udata)
243{
244 struct rxe_dev *rxe = to_rdev(dev);
245 struct rxe_ucontext *uc;
246
247 uc = rxe_alloc(&rxe->uc_pool);
248 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
249}
250
251static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
252{
253 struct rxe_ucontext *uc = to_ruc(ibuc);
254
255 rxe_drop_ref(uc);
256 return 0;
257}
258
259static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
260 struct ib_port_immutable *immutable)
261{
262 int err;
263 struct ib_port_attr attr;
264
265 err = rxe_query_port(dev, port_num, &attr);
266 if (err)
267 return err;
268
269 immutable->pkey_tbl_len = attr.pkey_tbl_len;
270 immutable->gid_tbl_len = attr.gid_tbl_len;
271 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
272 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
273
274 return 0;
275}
276
277static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
278 struct ib_ucontext *context,
279 struct ib_udata *udata)
280{
281 struct rxe_dev *rxe = to_rdev(dev);
282 struct rxe_pd *pd;
283
284 pd = rxe_alloc(&rxe->pd_pool);
285 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
286}
287
288static int rxe_dealloc_pd(struct ib_pd *ibpd)
289{
290 struct rxe_pd *pd = to_rpd(ibpd);
291
292 rxe_drop_ref(pd);
293 return 0;
294}
295
296static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
297 struct rxe_av *av)
298{
299 int err;
300 union ib_gid sgid;
301 struct ib_gid_attr sgid_attr;
302
303 err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
304 attr->grh.sgid_index, &sgid,
305 &sgid_attr);
306 if (err) {
307 pr_err("Failed to query sgid. err = %d\n", err);
308 return err;
309 }
310
311 err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
312 if (!err)
313 err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
314
315 if (sgid_attr.ndev)
316 dev_put(sgid_attr.ndev);
317 return err;
318}
319
Moni Shoua477864c2016-11-23 08:23:24 +0200320static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
321 struct ib_udata *udata)
322
Moni Shoua8700e3e2016-06-16 16:45:23 +0300323{
324 int err;
325 struct rxe_dev *rxe = to_rdev(ibpd->device);
326 struct rxe_pd *pd = to_rpd(ibpd);
327 struct rxe_ah *ah;
328
329 err = rxe_av_chk_attr(rxe, attr);
330 if (err)
331 goto err1;
332
333 ah = rxe_alloc(&rxe->ah_pool);
334 if (!ah) {
335 err = -ENOMEM;
336 goto err1;
337 }
338
339 rxe_add_ref(pd);
340 ah->pd = pd;
341
342 err = rxe_init_av(rxe, attr, &ah->av);
343 if (err)
344 goto err2;
345
346 return &ah->ibah;
347
348err2:
349 rxe_drop_ref(pd);
350 rxe_drop_ref(ah);
351err1:
352 return ERR_PTR(err);
353}
354
355static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
356{
357 int err;
358 struct rxe_dev *rxe = to_rdev(ibah->device);
359 struct rxe_ah *ah = to_rah(ibah);
360
361 err = rxe_av_chk_attr(rxe, attr);
362 if (err)
363 return err;
364
365 err = rxe_init_av(rxe, attr, &ah->av);
366 if (err)
367 return err;
368
369 return 0;
370}
371
372static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
373{
374 struct rxe_dev *rxe = to_rdev(ibah->device);
375 struct rxe_ah *ah = to_rah(ibah);
376
377 rxe_av_to_attr(rxe, &ah->av, attr);
378 return 0;
379}
380
381static int rxe_destroy_ah(struct ib_ah *ibah)
382{
383 struct rxe_ah *ah = to_rah(ibah);
384
385 rxe_drop_ref(ah->pd);
386 rxe_drop_ref(ah);
387 return 0;
388}
389
390static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
391{
392 int err;
393 int i;
394 u32 length;
395 struct rxe_recv_wqe *recv_wqe;
396 int num_sge = ibwr->num_sge;
397
398 if (unlikely(queue_full(rq->queue))) {
399 err = -ENOMEM;
400 goto err1;
401 }
402
403 if (unlikely(num_sge > rq->max_sge)) {
404 err = -EINVAL;
405 goto err1;
406 }
407
408 length = 0;
409 for (i = 0; i < num_sge; i++)
410 length += ibwr->sg_list[i].length;
411
412 recv_wqe = producer_addr(rq->queue);
413 recv_wqe->wr_id = ibwr->wr_id;
414 recv_wqe->num_sge = num_sge;
415
416 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
417 num_sge * sizeof(struct ib_sge));
418
419 recv_wqe->dma.length = length;
420 recv_wqe->dma.resid = length;
421 recv_wqe->dma.num_sge = num_sge;
422 recv_wqe->dma.cur_sge = 0;
423 recv_wqe->dma.sge_offset = 0;
424
425 /* make sure all changes to the work queue are written before we
426 * update the producer pointer
427 */
428 smp_wmb();
429
430 advance_producer(rq->queue);
431 return 0;
432
433err1:
434 return err;
435}
436
437static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
438 struct ib_srq_init_attr *init,
439 struct ib_udata *udata)
440{
441 int err;
442 struct rxe_dev *rxe = to_rdev(ibpd->device);
443 struct rxe_pd *pd = to_rpd(ibpd);
444 struct rxe_srq *srq;
445 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
446
447 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
448 if (err)
449 goto err1;
450
451 srq = rxe_alloc(&rxe->srq_pool);
452 if (!srq) {
453 err = -ENOMEM;
454 goto err1;
455 }
456
457 rxe_add_index(srq);
458 rxe_add_ref(pd);
459 srq->pd = pd;
460
461 err = rxe_srq_from_init(rxe, srq, init, context, udata);
462 if (err)
463 goto err2;
464
465 return &srq->ibsrq;
466
467err2:
468 rxe_drop_ref(pd);
469 rxe_drop_index(srq);
470 rxe_drop_ref(srq);
471err1:
472 return ERR_PTR(err);
473}
474
475static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
476 enum ib_srq_attr_mask mask,
477 struct ib_udata *udata)
478{
479 int err;
480 struct rxe_srq *srq = to_rsrq(ibsrq);
481 struct rxe_dev *rxe = to_rdev(ibsrq->device);
482
483 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
484 if (err)
485 goto err1;
486
487 err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
488 if (err)
489 goto err1;
490
491 return 0;
492
493err1:
494 return err;
495}
496
497static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
498{
499 struct rxe_srq *srq = to_rsrq(ibsrq);
500
501 if (srq->error)
502 return -EINVAL;
503
504 attr->max_wr = srq->rq.queue->buf->index_mask;
505 attr->max_sge = srq->rq.max_sge;
506 attr->srq_limit = srq->limit;
507 return 0;
508}
509
510static int rxe_destroy_srq(struct ib_srq *ibsrq)
511{
512 struct rxe_srq *srq = to_rsrq(ibsrq);
513
514 if (srq->rq.queue)
515 rxe_queue_cleanup(srq->rq.queue);
516
517 rxe_drop_ref(srq->pd);
518 rxe_drop_index(srq);
519 rxe_drop_ref(srq);
520
521 return 0;
522}
523
524static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
525 struct ib_recv_wr **bad_wr)
526{
527 int err = 0;
528 unsigned long flags;
529 struct rxe_srq *srq = to_rsrq(ibsrq);
530
531 spin_lock_irqsave(&srq->rq.producer_lock, flags);
532
533 while (wr) {
534 err = post_one_recv(&srq->rq, wr);
535 if (unlikely(err))
536 break;
537 wr = wr->next;
538 }
539
540 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
541
542 if (err)
543 *bad_wr = wr;
544
545 return err;
546}
547
548static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
549 struct ib_qp_init_attr *init,
550 struct ib_udata *udata)
551{
552 int err;
553 struct rxe_dev *rxe = to_rdev(ibpd->device);
554 struct rxe_pd *pd = to_rpd(ibpd);
555 struct rxe_qp *qp;
556
557 err = rxe_qp_chk_init(rxe, init);
558 if (err)
559 goto err1;
560
561 qp = rxe_alloc(&rxe->qp_pool);
562 if (!qp) {
563 err = -ENOMEM;
564 goto err1;
565 }
566
567 if (udata) {
568 if (udata->inlen) {
569 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500570 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300571 }
572 qp->is_user = 1;
573 }
574
575 rxe_add_index(qp);
576
577 err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
578 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500579 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300580
581 return &qp->ibqp;
582
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500583err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300584 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500585err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300586 rxe_drop_ref(qp);
587err1:
588 return ERR_PTR(err);
589}
590
591static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
592 int mask, struct ib_udata *udata)
593{
594 int err;
595 struct rxe_dev *rxe = to_rdev(ibqp->device);
596 struct rxe_qp *qp = to_rqp(ibqp);
597
598 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
599 if (err)
600 goto err1;
601
602 err = rxe_qp_from_attr(qp, attr, mask, udata);
603 if (err)
604 goto err1;
605
606 return 0;
607
608err1:
609 return err;
610}
611
612static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
613 int mask, struct ib_qp_init_attr *init)
614{
615 struct rxe_qp *qp = to_rqp(ibqp);
616
617 rxe_qp_to_init(qp, init);
618 rxe_qp_to_attr(qp, attr, mask);
619
620 return 0;
621}
622
623static int rxe_destroy_qp(struct ib_qp *ibqp)
624{
625 struct rxe_qp *qp = to_rqp(ibqp);
626
627 rxe_qp_destroy(qp);
628 rxe_drop_index(qp);
629 rxe_drop_ref(qp);
630 return 0;
631}
632
633static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
634 unsigned int mask, unsigned int length)
635{
636 int num_sge = ibwr->num_sge;
637 struct rxe_sq *sq = &qp->sq;
638
639 if (unlikely(num_sge > sq->max_sge))
640 goto err1;
641
642 if (unlikely(mask & WR_ATOMIC_MASK)) {
643 if (length < 8)
644 goto err1;
645
646 if (atomic_wr(ibwr)->remote_addr & 0x7)
647 goto err1;
648 }
649
650 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
651 (length > sq->max_inline)))
652 goto err1;
653
654 return 0;
655
656err1:
657 return -EINVAL;
658}
659
660static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
661 struct ib_send_wr *ibwr)
662{
663 wr->wr_id = ibwr->wr_id;
664 wr->num_sge = ibwr->num_sge;
665 wr->opcode = ibwr->opcode;
666 wr->send_flags = ibwr->send_flags;
667
668 if (qp_type(qp) == IB_QPT_UD ||
669 qp_type(qp) == IB_QPT_SMI ||
670 qp_type(qp) == IB_QPT_GSI) {
671 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
672 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
673 if (qp_type(qp) == IB_QPT_GSI)
674 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
675 if (wr->opcode == IB_WR_SEND_WITH_IMM)
676 wr->ex.imm_data = ibwr->ex.imm_data;
677 } else {
678 switch (wr->opcode) {
679 case IB_WR_RDMA_WRITE_WITH_IMM:
680 wr->ex.imm_data = ibwr->ex.imm_data;
681 case IB_WR_RDMA_READ:
682 case IB_WR_RDMA_WRITE:
683 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
684 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
685 break;
686 case IB_WR_SEND_WITH_IMM:
687 wr->ex.imm_data = ibwr->ex.imm_data;
688 break;
689 case IB_WR_SEND_WITH_INV:
690 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
691 break;
692 case IB_WR_ATOMIC_CMP_AND_SWP:
693 case IB_WR_ATOMIC_FETCH_AND_ADD:
694 wr->wr.atomic.remote_addr =
695 atomic_wr(ibwr)->remote_addr;
696 wr->wr.atomic.compare_add =
697 atomic_wr(ibwr)->compare_add;
698 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
699 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
700 break;
701 case IB_WR_LOCAL_INV:
702 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
703 break;
704 case IB_WR_REG_MR:
705 wr->wr.reg.mr = reg_wr(ibwr)->mr;
706 wr->wr.reg.key = reg_wr(ibwr)->key;
707 wr->wr.reg.access = reg_wr(ibwr)->access;
708 break;
709 default:
710 break;
711 }
712 }
713}
714
715static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
716 unsigned int mask, unsigned int length,
717 struct rxe_send_wqe *wqe)
718{
719 int num_sge = ibwr->num_sge;
720 struct ib_sge *sge;
721 int i;
722 u8 *p;
723
724 init_send_wr(qp, &wqe->wr, ibwr);
725
726 if (qp_type(qp) == IB_QPT_UD ||
727 qp_type(qp) == IB_QPT_SMI ||
728 qp_type(qp) == IB_QPT_GSI)
729 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
730
731 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
732 p = wqe->dma.inline_data;
733
734 sge = ibwr->sg_list;
735 for (i = 0; i < num_sge; i++, sge++) {
736 if (qp->is_user && copy_from_user(p, (__user void *)
737 (uintptr_t)sge->addr, sge->length))
738 return -EFAULT;
739
740 else if (!qp->is_user)
741 memcpy(p, (void *)(uintptr_t)sge->addr,
742 sge->length);
743
744 p += sge->length;
745 }
746 } else if (mask & WR_REG_MASK) {
747 wqe->mask = mask;
748 wqe->state = wqe_state_posted;
749 return 0;
750 } else
751 memcpy(wqe->dma.sge, ibwr->sg_list,
752 num_sge * sizeof(struct ib_sge));
753
754 wqe->iova = (mask & WR_ATOMIC_MASK) ?
755 atomic_wr(ibwr)->remote_addr :
756 rdma_wr(ibwr)->remote_addr;
757 wqe->mask = mask;
758 wqe->dma.length = length;
759 wqe->dma.resid = length;
760 wqe->dma.num_sge = num_sge;
761 wqe->dma.cur_sge = 0;
762 wqe->dma.sge_offset = 0;
763 wqe->state = wqe_state_posted;
764 wqe->ssn = atomic_add_return(1, &qp->ssn);
765
766 return 0;
767}
768
769static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000770 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300771{
772 int err;
773 struct rxe_sq *sq = &qp->sq;
774 struct rxe_send_wqe *send_wqe;
775 unsigned long flags;
776
777 err = validate_send_wr(qp, ibwr, mask, length);
778 if (err)
779 return err;
780
781 spin_lock_irqsave(&qp->sq.sq_lock, flags);
782
783 if (unlikely(queue_full(sq->queue))) {
784 err = -ENOMEM;
785 goto err1;
786 }
787
788 send_wqe = producer_addr(sq->queue);
789
790 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
791 if (unlikely(err))
792 goto err1;
793
794 /*
795 * make sure all changes to the work queue are
796 * written before we update the producer pointer
797 */
798 smp_wmb();
799
800 advance_producer(sq->queue);
801 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
802
803 return 0;
804
805err1:
806 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
807 return err;
808}
809
Parav Pandit063af592016-09-28 20:24:12 +0000810static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
811 struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300812{
813 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300814 unsigned int mask;
815 unsigned int length = 0;
816 int i;
817 int must_sched;
818
Moni Shoua8700e3e2016-06-16 16:45:23 +0300819 while (wr) {
820 mask = wr_opcode_mask(wr->opcode, qp);
821 if (unlikely(!mask)) {
822 err = -EINVAL;
823 *bad_wr = wr;
824 break;
825 }
826
827 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
828 !(mask & WR_INLINE_MASK))) {
829 err = -EINVAL;
830 *bad_wr = wr;
831 break;
832 }
833
834 length = 0;
835 for (i = 0; i < wr->num_sge; i++)
836 length += wr->sg_list[i].length;
837
838 err = post_one_send(qp, wr, mask, length);
839
840 if (err) {
841 *bad_wr = wr;
842 break;
843 }
844 wr = wr->next;
845 }
846
847 /*
848 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
849 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
850 */
851 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
852 (queue_count(qp->sq.queue) > 1);
853
854 rxe_run_task(&qp->req.task, must_sched);
855
856 return err;
857}
858
Parav Pandit063af592016-09-28 20:24:12 +0000859static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
860 struct ib_send_wr **bad_wr)
861{
862 struct rxe_qp *qp = to_rqp(ibqp);
863
864 if (unlikely(!qp->valid)) {
865 *bad_wr = wr;
866 return -EINVAL;
867 }
868
869 if (unlikely(qp->req.state < QP_STATE_READY)) {
870 *bad_wr = wr;
871 return -EINVAL;
872 }
873
874 if (qp->is_user) {
875 /* Utilize process context to do protocol processing */
876 rxe_run_task(&qp->req.task, 0);
877 return 0;
878 } else
879 return rxe_post_send_kernel(qp, wr, bad_wr);
880}
881
Moni Shoua8700e3e2016-06-16 16:45:23 +0300882static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
883 struct ib_recv_wr **bad_wr)
884{
885 int err = 0;
886 struct rxe_qp *qp = to_rqp(ibqp);
887 struct rxe_rq *rq = &qp->rq;
888 unsigned long flags;
889
890 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
891 *bad_wr = wr;
892 err = -EINVAL;
893 goto err1;
894 }
895
896 if (unlikely(qp->srq)) {
897 *bad_wr = wr;
898 err = -EINVAL;
899 goto err1;
900 }
901
902 spin_lock_irqsave(&rq->producer_lock, flags);
903
904 while (wr) {
905 err = post_one_recv(rq, wr);
906 if (unlikely(err)) {
907 *bad_wr = wr;
908 break;
909 }
910 wr = wr->next;
911 }
912
913 spin_unlock_irqrestore(&rq->producer_lock, flags);
914
915err1:
916 return err;
917}
918
919static struct ib_cq *rxe_create_cq(struct ib_device *dev,
920 const struct ib_cq_init_attr *attr,
921 struct ib_ucontext *context,
922 struct ib_udata *udata)
923{
924 int err;
925 struct rxe_dev *rxe = to_rdev(dev);
926 struct rxe_cq *cq;
927
928 if (attr->flags)
929 return ERR_PTR(-EINVAL);
930
931 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
932 if (err)
933 goto err1;
934
935 cq = rxe_alloc(&rxe->cq_pool);
936 if (!cq) {
937 err = -ENOMEM;
938 goto err1;
939 }
940
941 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
942 context, udata);
943 if (err)
944 goto err2;
945
946 return &cq->ibcq;
947
948err2:
949 rxe_drop_ref(cq);
950err1:
951 return ERR_PTR(err);
952}
953
954static int rxe_destroy_cq(struct ib_cq *ibcq)
955{
956 struct rxe_cq *cq = to_rcq(ibcq);
957
958 rxe_drop_ref(cq);
959 return 0;
960}
961
962static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
963{
964 int err;
965 struct rxe_cq *cq = to_rcq(ibcq);
966 struct rxe_dev *rxe = to_rdev(ibcq->device);
967
968 err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
969 if (err)
970 goto err1;
971
972 err = rxe_cq_resize_queue(cq, cqe, udata);
973 if (err)
974 goto err1;
975
976 return 0;
977
978err1:
979 return err;
980}
981
982static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
983{
984 int i;
985 struct rxe_cq *cq = to_rcq(ibcq);
986 struct rxe_cqe *cqe;
987 unsigned long flags;
988
989 spin_lock_irqsave(&cq->cq_lock, flags);
990 for (i = 0; i < num_entries; i++) {
991 cqe = queue_head(cq->queue);
992 if (!cqe)
993 break;
994
995 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
996 advance_consumer(cq->queue);
997 }
998 spin_unlock_irqrestore(&cq->cq_lock, flags);
999
1000 return i;
1001}
1002
1003static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
1004{
1005 struct rxe_cq *cq = to_rcq(ibcq);
1006 int count = queue_count(cq->queue);
1007
1008 return (count > wc_cnt) ? wc_cnt : count;
1009}
1010
1011static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1012{
1013 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -05001014 unsigned long irq_flags;
1015 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001016
Andrew Boyeraccacb82016-11-23 12:39:22 -05001017 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001018 if (cq->notify != IB_CQ_NEXT_COMP)
1019 cq->notify = flags & IB_CQ_SOLICITED_MASK;
1020
Andrew Boyeraccacb82016-11-23 12:39:22 -05001021 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
1022 ret = 1;
1023
1024 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1025
1026 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001027}
1028
1029static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1030{
1031 struct rxe_dev *rxe = to_rdev(ibpd->device);
1032 struct rxe_pd *pd = to_rpd(ibpd);
1033 struct rxe_mem *mr;
1034 int err;
1035
1036 mr = rxe_alloc(&rxe->mr_pool);
1037 if (!mr) {
1038 err = -ENOMEM;
1039 goto err1;
1040 }
1041
1042 rxe_add_index(mr);
1043
1044 rxe_add_ref(pd);
1045
1046 err = rxe_mem_init_dma(rxe, pd, access, mr);
1047 if (err)
1048 goto err2;
1049
1050 return &mr->ibmr;
1051
1052err2:
1053 rxe_drop_ref(pd);
1054 rxe_drop_index(mr);
1055 rxe_drop_ref(mr);
1056err1:
1057 return ERR_PTR(err);
1058}
1059
1060static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1061 u64 start,
1062 u64 length,
1063 u64 iova,
1064 int access, struct ib_udata *udata)
1065{
1066 int err;
1067 struct rxe_dev *rxe = to_rdev(ibpd->device);
1068 struct rxe_pd *pd = to_rpd(ibpd);
1069 struct rxe_mem *mr;
1070
1071 mr = rxe_alloc(&rxe->mr_pool);
1072 if (!mr) {
1073 err = -ENOMEM;
1074 goto err2;
1075 }
1076
1077 rxe_add_index(mr);
1078
1079 rxe_add_ref(pd);
1080
1081 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1082 access, udata, mr);
1083 if (err)
1084 goto err3;
1085
1086 return &mr->ibmr;
1087
1088err3:
1089 rxe_drop_ref(pd);
1090 rxe_drop_index(mr);
1091 rxe_drop_ref(mr);
1092err2:
1093 return ERR_PTR(err);
1094}
1095
1096static int rxe_dereg_mr(struct ib_mr *ibmr)
1097{
1098 struct rxe_mem *mr = to_rmr(ibmr);
1099
1100 mr->state = RXE_MEM_STATE_ZOMBIE;
1101 rxe_drop_ref(mr->pd);
1102 rxe_drop_index(mr);
1103 rxe_drop_ref(mr);
1104 return 0;
1105}
1106
1107static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1108 enum ib_mr_type mr_type,
1109 u32 max_num_sg)
1110{
1111 struct rxe_dev *rxe = to_rdev(ibpd->device);
1112 struct rxe_pd *pd = to_rpd(ibpd);
1113 struct rxe_mem *mr;
1114 int err;
1115
1116 if (mr_type != IB_MR_TYPE_MEM_REG)
1117 return ERR_PTR(-EINVAL);
1118
1119 mr = rxe_alloc(&rxe->mr_pool);
1120 if (!mr) {
1121 err = -ENOMEM;
1122 goto err1;
1123 }
1124
1125 rxe_add_index(mr);
1126
1127 rxe_add_ref(pd);
1128
1129 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1130 if (err)
1131 goto err2;
1132
1133 return &mr->ibmr;
1134
1135err2:
1136 rxe_drop_ref(pd);
1137 rxe_drop_index(mr);
1138 rxe_drop_ref(mr);
1139err1:
1140 return ERR_PTR(err);
1141}
1142
1143static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1144{
1145 struct rxe_mem *mr = to_rmr(ibmr);
1146 struct rxe_map *map;
1147 struct rxe_phys_buf *buf;
1148
1149 if (unlikely(mr->nbuf == mr->num_buf))
1150 return -ENOMEM;
1151
1152 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1153 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1154
1155 buf->addr = addr;
1156 buf->size = ibmr->page_size;
1157 mr->nbuf++;
1158
1159 return 0;
1160}
1161
Parav Pandite404f942016-09-28 20:26:26 +00001162static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1163 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001164{
1165 struct rxe_mem *mr = to_rmr(ibmr);
1166 int n;
1167
1168 mr->nbuf = 0;
1169
1170 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1171
1172 mr->va = ibmr->iova;
1173 mr->iova = ibmr->iova;
1174 mr->length = ibmr->length;
1175 mr->page_shift = ilog2(ibmr->page_size);
1176 mr->page_mask = ibmr->page_size - 1;
1177 mr->offset = mr->iova & mr->page_mask;
1178
1179 return n;
1180}
1181
1182static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1183{
1184 int err;
1185 struct rxe_dev *rxe = to_rdev(ibqp->device);
1186 struct rxe_qp *qp = to_rqp(ibqp);
1187 struct rxe_mc_grp *grp;
1188
1189 /* takes a ref on grp if successful */
1190 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1191 if (err)
1192 return err;
1193
1194 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1195
1196 rxe_drop_ref(grp);
1197 return err;
1198}
1199
1200static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1201{
1202 struct rxe_dev *rxe = to_rdev(ibqp->device);
1203 struct rxe_qp *qp = to_rqp(ibqp);
1204
1205 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1206}
1207
1208static ssize_t rxe_show_parent(struct device *device,
1209 struct device_attribute *attr, char *buf)
1210{
1211 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1212 ib_dev.dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001213
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001214 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001215}
1216
1217static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
1218
1219static struct device_attribute *rxe_dev_attributes[] = {
1220 &dev_attr_parent,
1221};
1222
1223int rxe_register_device(struct rxe_dev *rxe)
1224{
1225 int err;
1226 int i;
1227 struct ib_device *dev = &rxe->ib_dev;
1228
1229 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1230 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1231
1232 dev->owner = THIS_MODULE;
1233 dev->node_type = RDMA_NODE_IB_CA;
1234 dev->phys_port_cnt = 1;
1235 dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
Bart Van Assche85e9f1d2017-01-20 13:04:29 -08001236 dev->dev.parent = rxe_dma_device(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001237 dev->local_dma_lkey = 0;
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001238 dev->node_guid = rxe_node_guid(rxe);
Bart Van Assche0bbb3b72017-01-20 13:04:37 -08001239 dev->dev.dma_ops = &dma_virt_ops;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001240
1241 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1242 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1243 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1244 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1245 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1246 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1247 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1248 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1249 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1250 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1251 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1252 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1253 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1254 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1255 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1256 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1257 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1258 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1259 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1260 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1261 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1262 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1263 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1264 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1265 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1266 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1267 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1268 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1269 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1270 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1271 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1272 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1273 ;
1274
1275 dev->query_device = rxe_query_device;
1276 dev->modify_device = rxe_modify_device;
1277 dev->query_port = rxe_query_port;
1278 dev->modify_port = rxe_modify_port;
1279 dev->get_link_layer = rxe_get_link_layer;
1280 dev->query_gid = rxe_query_gid;
1281 dev->get_netdev = rxe_get_netdev;
1282 dev->add_gid = rxe_add_gid;
1283 dev->del_gid = rxe_del_gid;
1284 dev->query_pkey = rxe_query_pkey;
1285 dev->alloc_ucontext = rxe_alloc_ucontext;
1286 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1287 dev->mmap = rxe_mmap;
1288 dev->get_port_immutable = rxe_port_immutable;
1289 dev->alloc_pd = rxe_alloc_pd;
1290 dev->dealloc_pd = rxe_dealloc_pd;
1291 dev->create_ah = rxe_create_ah;
1292 dev->modify_ah = rxe_modify_ah;
1293 dev->query_ah = rxe_query_ah;
1294 dev->destroy_ah = rxe_destroy_ah;
1295 dev->create_srq = rxe_create_srq;
1296 dev->modify_srq = rxe_modify_srq;
1297 dev->query_srq = rxe_query_srq;
1298 dev->destroy_srq = rxe_destroy_srq;
1299 dev->post_srq_recv = rxe_post_srq_recv;
1300 dev->create_qp = rxe_create_qp;
1301 dev->modify_qp = rxe_modify_qp;
1302 dev->query_qp = rxe_query_qp;
1303 dev->destroy_qp = rxe_destroy_qp;
1304 dev->post_send = rxe_post_send;
1305 dev->post_recv = rxe_post_recv;
1306 dev->create_cq = rxe_create_cq;
1307 dev->destroy_cq = rxe_destroy_cq;
1308 dev->resize_cq = rxe_resize_cq;
1309 dev->poll_cq = rxe_poll_cq;
1310 dev->peek_cq = rxe_peek_cq;
1311 dev->req_notify_cq = rxe_req_notify_cq;
1312 dev->get_dma_mr = rxe_get_dma_mr;
1313 dev->reg_user_mr = rxe_reg_user_mr;
1314 dev->dereg_mr = rxe_dereg_mr;
1315 dev->alloc_mr = rxe_alloc_mr;
1316 dev->map_mr_sg = rxe_map_mr_sg;
1317 dev->attach_mcast = rxe_attach_mcast;
1318 dev->detach_mcast = rxe_detach_mcast;
1319
1320 err = ib_register_device(dev, NULL);
1321 if (err) {
1322 pr_warn("rxe_register_device failed, err = %d\n", err);
1323 goto err1;
1324 }
1325
1326 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1327 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1328 if (err) {
1329 pr_warn("device_create_file failed, i = %d, err = %d\n",
1330 i, err);
1331 goto err2;
1332 }
1333 }
1334
1335 return 0;
1336
1337err2:
1338 ib_unregister_device(dev);
1339err1:
1340 return err;
1341}
1342
1343int rxe_unregister_device(struct rxe_dev *rxe)
1344{
1345 int i;
1346 struct ib_device *dev = &rxe->ib_dev;
1347
1348 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1349 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1350
1351 ib_unregister_device(dev);
1352
1353 return 0;
1354}