blob: 187d85ccfe58fc58fb5cb5ed286a650a212216fc [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "rxe.h"
35#include "rxe_loc.h"
36#include "rxe_queue.h"
37
38static int rxe_query_device(struct ib_device *dev,
39 struct ib_device_attr *attr,
40 struct ib_udata *uhw)
41{
42 struct rxe_dev *rxe = to_rdev(dev);
43
44 if (uhw->inlen || uhw->outlen)
45 return -EINVAL;
46
47 *attr = rxe->attr;
48 return 0;
49}
50
51static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
52 u8 *active_width)
53{
54 if (speed <= 1000) {
55 *active_width = IB_WIDTH_1X;
56 *active_speed = IB_SPEED_SDR;
57 } else if (speed <= 10000) {
58 *active_width = IB_WIDTH_1X;
59 *active_speed = IB_SPEED_FDR10;
60 } else if (speed <= 20000) {
61 *active_width = IB_WIDTH_4X;
62 *active_speed = IB_SPEED_DDR;
63 } else if (speed <= 30000) {
64 *active_width = IB_WIDTH_4X;
65 *active_speed = IB_SPEED_QDR;
66 } else if (speed <= 40000) {
67 *active_width = IB_WIDTH_4X;
68 *active_speed = IB_SPEED_FDR10;
69 } else {
70 *active_width = IB_WIDTH_4X;
71 *active_speed = IB_SPEED_EDR;
72 }
73}
74
75static int rxe_query_port(struct ib_device *dev,
76 u8 port_num, struct ib_port_attr *attr)
77{
78 struct rxe_dev *rxe = to_rdev(dev);
79 struct rxe_port *port;
80 u32 speed;
81
82 if (unlikely(port_num != 1)) {
83 pr_warn("invalid port_number %d\n", port_num);
84 goto err1;
85 }
86
87 port = &rxe->port;
88
89 *attr = port->attr;
90
91 mutex_lock(&rxe->usdev_lock);
92 if (rxe->ndev->ethtool_ops->get_link_ksettings) {
93 struct ethtool_link_ksettings ks;
94
95 rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
96 speed = ks.base.speed;
97 } else if (rxe->ndev->ethtool_ops->get_settings) {
98 struct ethtool_cmd cmd;
99
100 rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
101 speed = cmd.speed;
102 } else {
Parav Pandite404f942016-09-28 20:26:26 +0000103 pr_warn("%s speed is unknown, defaulting to 1000\n",
104 rxe->ndev->name);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300105 speed = 1000;
106 }
Parav Pandite404f942016-09-28 20:26:26 +0000107 rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
108 &attr->active_width);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300109 mutex_unlock(&rxe->usdev_lock);
110
111 return 0;
112
113err1:
114 return -EINVAL;
115}
116
117static int rxe_query_gid(struct ib_device *device,
118 u8 port_num, int index, union ib_gid *gid)
119{
120 int ret;
121
122 if (index > RXE_PORT_GID_TBL_LEN)
123 return -EINVAL;
124
125 ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
126 if (ret == -EAGAIN) {
127 memcpy(gid, &zgid, sizeof(*gid));
128 return 0;
129 }
130
131 return ret;
132}
133
134static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
135 index, const union ib_gid *gid,
136 const struct ib_gid_attr *attr, void **context)
137{
138 if (index >= RXE_PORT_GID_TBL_LEN)
139 return -EINVAL;
140 return 0;
141}
142
143static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
144 index, void **context)
145{
146 if (index >= RXE_PORT_GID_TBL_LEN)
147 return -EINVAL;
148 return 0;
149}
150
151static struct net_device *rxe_get_netdev(struct ib_device *device,
152 u8 port_num)
153{
154 struct rxe_dev *rxe = to_rdev(device);
155
156 if (rxe->ndev) {
157 dev_hold(rxe->ndev);
158 return rxe->ndev;
159 }
160
161 return NULL;
162}
163
164static int rxe_query_pkey(struct ib_device *device,
165 u8 port_num, u16 index, u16 *pkey)
166{
167 struct rxe_dev *rxe = to_rdev(device);
168 struct rxe_port *port;
169
170 if (unlikely(port_num != 1)) {
171 dev_warn(device->dma_device, "invalid port_num = %d\n",
172 port_num);
173 goto err1;
174 }
175
176 port = &rxe->port;
177
178 if (unlikely(index >= port->attr.pkey_tbl_len)) {
179 dev_warn(device->dma_device, "invalid index = %d\n",
180 index);
181 goto err1;
182 }
183
184 *pkey = port->pkey_tbl[index];
185 return 0;
186
187err1:
188 return -EINVAL;
189}
190
191static int rxe_modify_device(struct ib_device *dev,
192 int mask, struct ib_device_modify *attr)
193{
194 struct rxe_dev *rxe = to_rdev(dev);
195
196 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
197 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
198
199 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
200 memcpy(rxe->ib_dev.node_desc,
201 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
202 }
203
204 return 0;
205}
206
207static int rxe_modify_port(struct ib_device *dev,
208 u8 port_num, int mask, struct ib_port_modify *attr)
209{
210 struct rxe_dev *rxe = to_rdev(dev);
211 struct rxe_port *port;
212
213 if (unlikely(port_num != 1)) {
214 pr_warn("invalid port_num = %d\n", port_num);
215 goto err1;
216 }
217
218 port = &rxe->port;
219
220 port->attr.port_cap_flags |= attr->set_port_cap_mask;
221 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
222
223 if (mask & IB_PORT_RESET_QKEY_CNTR)
224 port->attr.qkey_viol_cntr = 0;
225
226 return 0;
227
228err1:
229 return -EINVAL;
230}
231
232static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
233 u8 port_num)
234{
235 struct rxe_dev *rxe = to_rdev(dev);
236
237 return rxe->ifc_ops->link_layer(rxe, port_num);
238}
239
240static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
241 struct ib_udata *udata)
242{
243 struct rxe_dev *rxe = to_rdev(dev);
244 struct rxe_ucontext *uc;
245
246 uc = rxe_alloc(&rxe->uc_pool);
247 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
248}
249
250static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
251{
252 struct rxe_ucontext *uc = to_ruc(ibuc);
253
254 rxe_drop_ref(uc);
255 return 0;
256}
257
258static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
259 struct ib_port_immutable *immutable)
260{
261 int err;
262 struct ib_port_attr attr;
263
264 err = rxe_query_port(dev, port_num, &attr);
265 if (err)
266 return err;
267
268 immutable->pkey_tbl_len = attr.pkey_tbl_len;
269 immutable->gid_tbl_len = attr.gid_tbl_len;
270 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
271 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
272
273 return 0;
274}
275
276static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
277 struct ib_ucontext *context,
278 struct ib_udata *udata)
279{
280 struct rxe_dev *rxe = to_rdev(dev);
281 struct rxe_pd *pd;
282
283 pd = rxe_alloc(&rxe->pd_pool);
284 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
285}
286
287static int rxe_dealloc_pd(struct ib_pd *ibpd)
288{
289 struct rxe_pd *pd = to_rpd(ibpd);
290
291 rxe_drop_ref(pd);
292 return 0;
293}
294
295static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
296 struct rxe_av *av)
297{
298 int err;
299 union ib_gid sgid;
300 struct ib_gid_attr sgid_attr;
301
302 err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
303 attr->grh.sgid_index, &sgid,
304 &sgid_attr);
305 if (err) {
306 pr_err("Failed to query sgid. err = %d\n", err);
307 return err;
308 }
309
310 err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
311 if (!err)
312 err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
313
314 if (sgid_attr.ndev)
315 dev_put(sgid_attr.ndev);
316 return err;
317}
318
Moni Shoua477864c2016-11-23 08:23:24 +0200319static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
320 struct ib_udata *udata)
321
Moni Shoua8700e3e2016-06-16 16:45:23 +0300322{
323 int err;
324 struct rxe_dev *rxe = to_rdev(ibpd->device);
325 struct rxe_pd *pd = to_rpd(ibpd);
326 struct rxe_ah *ah;
327
328 err = rxe_av_chk_attr(rxe, attr);
329 if (err)
330 goto err1;
331
332 ah = rxe_alloc(&rxe->ah_pool);
333 if (!ah) {
334 err = -ENOMEM;
335 goto err1;
336 }
337
338 rxe_add_ref(pd);
339 ah->pd = pd;
340
341 err = rxe_init_av(rxe, attr, &ah->av);
342 if (err)
343 goto err2;
344
345 return &ah->ibah;
346
347err2:
348 rxe_drop_ref(pd);
349 rxe_drop_ref(ah);
350err1:
351 return ERR_PTR(err);
352}
353
354static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
355{
356 int err;
357 struct rxe_dev *rxe = to_rdev(ibah->device);
358 struct rxe_ah *ah = to_rah(ibah);
359
360 err = rxe_av_chk_attr(rxe, attr);
361 if (err)
362 return err;
363
364 err = rxe_init_av(rxe, attr, &ah->av);
365 if (err)
366 return err;
367
368 return 0;
369}
370
371static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
372{
373 struct rxe_dev *rxe = to_rdev(ibah->device);
374 struct rxe_ah *ah = to_rah(ibah);
375
376 rxe_av_to_attr(rxe, &ah->av, attr);
377 return 0;
378}
379
380static int rxe_destroy_ah(struct ib_ah *ibah)
381{
382 struct rxe_ah *ah = to_rah(ibah);
383
384 rxe_drop_ref(ah->pd);
385 rxe_drop_ref(ah);
386 return 0;
387}
388
389static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
390{
391 int err;
392 int i;
393 u32 length;
394 struct rxe_recv_wqe *recv_wqe;
395 int num_sge = ibwr->num_sge;
396
397 if (unlikely(queue_full(rq->queue))) {
398 err = -ENOMEM;
399 goto err1;
400 }
401
402 if (unlikely(num_sge > rq->max_sge)) {
403 err = -EINVAL;
404 goto err1;
405 }
406
407 length = 0;
408 for (i = 0; i < num_sge; i++)
409 length += ibwr->sg_list[i].length;
410
411 recv_wqe = producer_addr(rq->queue);
412 recv_wqe->wr_id = ibwr->wr_id;
413 recv_wqe->num_sge = num_sge;
414
415 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
416 num_sge * sizeof(struct ib_sge));
417
418 recv_wqe->dma.length = length;
419 recv_wqe->dma.resid = length;
420 recv_wqe->dma.num_sge = num_sge;
421 recv_wqe->dma.cur_sge = 0;
422 recv_wqe->dma.sge_offset = 0;
423
424 /* make sure all changes to the work queue are written before we
425 * update the producer pointer
426 */
427 smp_wmb();
428
429 advance_producer(rq->queue);
430 return 0;
431
432err1:
433 return err;
434}
435
436static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
437 struct ib_srq_init_attr *init,
438 struct ib_udata *udata)
439{
440 int err;
441 struct rxe_dev *rxe = to_rdev(ibpd->device);
442 struct rxe_pd *pd = to_rpd(ibpd);
443 struct rxe_srq *srq;
444 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
445
446 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
447 if (err)
448 goto err1;
449
450 srq = rxe_alloc(&rxe->srq_pool);
451 if (!srq) {
452 err = -ENOMEM;
453 goto err1;
454 }
455
456 rxe_add_index(srq);
457 rxe_add_ref(pd);
458 srq->pd = pd;
459
460 err = rxe_srq_from_init(rxe, srq, init, context, udata);
461 if (err)
462 goto err2;
463
464 return &srq->ibsrq;
465
466err2:
467 rxe_drop_ref(pd);
468 rxe_drop_index(srq);
469 rxe_drop_ref(srq);
470err1:
471 return ERR_PTR(err);
472}
473
474static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
475 enum ib_srq_attr_mask mask,
476 struct ib_udata *udata)
477{
478 int err;
479 struct rxe_srq *srq = to_rsrq(ibsrq);
480 struct rxe_dev *rxe = to_rdev(ibsrq->device);
481
482 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
483 if (err)
484 goto err1;
485
486 err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
487 if (err)
488 goto err1;
489
490 return 0;
491
492err1:
493 return err;
494}
495
496static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
497{
498 struct rxe_srq *srq = to_rsrq(ibsrq);
499
500 if (srq->error)
501 return -EINVAL;
502
503 attr->max_wr = srq->rq.queue->buf->index_mask;
504 attr->max_sge = srq->rq.max_sge;
505 attr->srq_limit = srq->limit;
506 return 0;
507}
508
509static int rxe_destroy_srq(struct ib_srq *ibsrq)
510{
511 struct rxe_srq *srq = to_rsrq(ibsrq);
512
513 if (srq->rq.queue)
514 rxe_queue_cleanup(srq->rq.queue);
515
516 rxe_drop_ref(srq->pd);
517 rxe_drop_index(srq);
518 rxe_drop_ref(srq);
519
520 return 0;
521}
522
523static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
524 struct ib_recv_wr **bad_wr)
525{
526 int err = 0;
527 unsigned long flags;
528 struct rxe_srq *srq = to_rsrq(ibsrq);
529
530 spin_lock_irqsave(&srq->rq.producer_lock, flags);
531
532 while (wr) {
533 err = post_one_recv(&srq->rq, wr);
534 if (unlikely(err))
535 break;
536 wr = wr->next;
537 }
538
539 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
540
541 if (err)
542 *bad_wr = wr;
543
544 return err;
545}
546
547static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
548 struct ib_qp_init_attr *init,
549 struct ib_udata *udata)
550{
551 int err;
552 struct rxe_dev *rxe = to_rdev(ibpd->device);
553 struct rxe_pd *pd = to_rpd(ibpd);
554 struct rxe_qp *qp;
555
556 err = rxe_qp_chk_init(rxe, init);
557 if (err)
558 goto err1;
559
560 qp = rxe_alloc(&rxe->qp_pool);
561 if (!qp) {
562 err = -ENOMEM;
563 goto err1;
564 }
565
566 if (udata) {
567 if (udata->inlen) {
568 err = -EINVAL;
569 goto err1;
570 }
571 qp->is_user = 1;
572 }
573
574 rxe_add_index(qp);
575
576 err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
577 if (err)
578 goto err2;
579
580 return &qp->ibqp;
581
582err2:
583 rxe_drop_index(qp);
584 rxe_drop_ref(qp);
585err1:
586 return ERR_PTR(err);
587}
588
589static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
590 int mask, struct ib_udata *udata)
591{
592 int err;
593 struct rxe_dev *rxe = to_rdev(ibqp->device);
594 struct rxe_qp *qp = to_rqp(ibqp);
595
596 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
597 if (err)
598 goto err1;
599
600 err = rxe_qp_from_attr(qp, attr, mask, udata);
601 if (err)
602 goto err1;
603
604 return 0;
605
606err1:
607 return err;
608}
609
610static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
611 int mask, struct ib_qp_init_attr *init)
612{
613 struct rxe_qp *qp = to_rqp(ibqp);
614
615 rxe_qp_to_init(qp, init);
616 rxe_qp_to_attr(qp, attr, mask);
617
618 return 0;
619}
620
621static int rxe_destroy_qp(struct ib_qp *ibqp)
622{
623 struct rxe_qp *qp = to_rqp(ibqp);
624
625 rxe_qp_destroy(qp);
626 rxe_drop_index(qp);
627 rxe_drop_ref(qp);
628 return 0;
629}
630
631static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
632 unsigned int mask, unsigned int length)
633{
634 int num_sge = ibwr->num_sge;
635 struct rxe_sq *sq = &qp->sq;
636
637 if (unlikely(num_sge > sq->max_sge))
638 goto err1;
639
640 if (unlikely(mask & WR_ATOMIC_MASK)) {
641 if (length < 8)
642 goto err1;
643
644 if (atomic_wr(ibwr)->remote_addr & 0x7)
645 goto err1;
646 }
647
648 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
649 (length > sq->max_inline)))
650 goto err1;
651
652 return 0;
653
654err1:
655 return -EINVAL;
656}
657
658static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
659 struct ib_send_wr *ibwr)
660{
661 wr->wr_id = ibwr->wr_id;
662 wr->num_sge = ibwr->num_sge;
663 wr->opcode = ibwr->opcode;
664 wr->send_flags = ibwr->send_flags;
665
666 if (qp_type(qp) == IB_QPT_UD ||
667 qp_type(qp) == IB_QPT_SMI ||
668 qp_type(qp) == IB_QPT_GSI) {
669 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
670 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
671 if (qp_type(qp) == IB_QPT_GSI)
672 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
673 if (wr->opcode == IB_WR_SEND_WITH_IMM)
674 wr->ex.imm_data = ibwr->ex.imm_data;
675 } else {
676 switch (wr->opcode) {
677 case IB_WR_RDMA_WRITE_WITH_IMM:
678 wr->ex.imm_data = ibwr->ex.imm_data;
679 case IB_WR_RDMA_READ:
680 case IB_WR_RDMA_WRITE:
681 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
682 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
683 break;
684 case IB_WR_SEND_WITH_IMM:
685 wr->ex.imm_data = ibwr->ex.imm_data;
686 break;
687 case IB_WR_SEND_WITH_INV:
688 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
689 break;
690 case IB_WR_ATOMIC_CMP_AND_SWP:
691 case IB_WR_ATOMIC_FETCH_AND_ADD:
692 wr->wr.atomic.remote_addr =
693 atomic_wr(ibwr)->remote_addr;
694 wr->wr.atomic.compare_add =
695 atomic_wr(ibwr)->compare_add;
696 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
697 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
698 break;
699 case IB_WR_LOCAL_INV:
700 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
701 break;
702 case IB_WR_REG_MR:
703 wr->wr.reg.mr = reg_wr(ibwr)->mr;
704 wr->wr.reg.key = reg_wr(ibwr)->key;
705 wr->wr.reg.access = reg_wr(ibwr)->access;
706 break;
707 default:
708 break;
709 }
710 }
711}
712
713static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
714 unsigned int mask, unsigned int length,
715 struct rxe_send_wqe *wqe)
716{
717 int num_sge = ibwr->num_sge;
718 struct ib_sge *sge;
719 int i;
720 u8 *p;
721
722 init_send_wr(qp, &wqe->wr, ibwr);
723
724 if (qp_type(qp) == IB_QPT_UD ||
725 qp_type(qp) == IB_QPT_SMI ||
726 qp_type(qp) == IB_QPT_GSI)
727 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
728
729 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
730 p = wqe->dma.inline_data;
731
732 sge = ibwr->sg_list;
733 for (i = 0; i < num_sge; i++, sge++) {
734 if (qp->is_user && copy_from_user(p, (__user void *)
735 (uintptr_t)sge->addr, sge->length))
736 return -EFAULT;
737
738 else if (!qp->is_user)
739 memcpy(p, (void *)(uintptr_t)sge->addr,
740 sge->length);
741
742 p += sge->length;
743 }
744 } else if (mask & WR_REG_MASK) {
745 wqe->mask = mask;
746 wqe->state = wqe_state_posted;
747 return 0;
748 } else
749 memcpy(wqe->dma.sge, ibwr->sg_list,
750 num_sge * sizeof(struct ib_sge));
751
752 wqe->iova = (mask & WR_ATOMIC_MASK) ?
753 atomic_wr(ibwr)->remote_addr :
754 rdma_wr(ibwr)->remote_addr;
755 wqe->mask = mask;
756 wqe->dma.length = length;
757 wqe->dma.resid = length;
758 wqe->dma.num_sge = num_sge;
759 wqe->dma.cur_sge = 0;
760 wqe->dma.sge_offset = 0;
761 wqe->state = wqe_state_posted;
762 wqe->ssn = atomic_add_return(1, &qp->ssn);
763
764 return 0;
765}
766
767static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000768 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300769{
770 int err;
771 struct rxe_sq *sq = &qp->sq;
772 struct rxe_send_wqe *send_wqe;
773 unsigned long flags;
774
775 err = validate_send_wr(qp, ibwr, mask, length);
776 if (err)
777 return err;
778
779 spin_lock_irqsave(&qp->sq.sq_lock, flags);
780
781 if (unlikely(queue_full(sq->queue))) {
782 err = -ENOMEM;
783 goto err1;
784 }
785
786 send_wqe = producer_addr(sq->queue);
787
788 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
789 if (unlikely(err))
790 goto err1;
791
792 /*
793 * make sure all changes to the work queue are
794 * written before we update the producer pointer
795 */
796 smp_wmb();
797
798 advance_producer(sq->queue);
799 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
800
801 return 0;
802
803err1:
804 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
805 return err;
806}
807
Parav Pandit063af592016-09-28 20:24:12 +0000808static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
809 struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300810{
811 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300812 unsigned int mask;
813 unsigned int length = 0;
814 int i;
815 int must_sched;
816
Moni Shoua8700e3e2016-06-16 16:45:23 +0300817 while (wr) {
818 mask = wr_opcode_mask(wr->opcode, qp);
819 if (unlikely(!mask)) {
820 err = -EINVAL;
821 *bad_wr = wr;
822 break;
823 }
824
825 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
826 !(mask & WR_INLINE_MASK))) {
827 err = -EINVAL;
828 *bad_wr = wr;
829 break;
830 }
831
832 length = 0;
833 for (i = 0; i < wr->num_sge; i++)
834 length += wr->sg_list[i].length;
835
836 err = post_one_send(qp, wr, mask, length);
837
838 if (err) {
839 *bad_wr = wr;
840 break;
841 }
842 wr = wr->next;
843 }
844
845 /*
846 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
847 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
848 */
849 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
850 (queue_count(qp->sq.queue) > 1);
851
852 rxe_run_task(&qp->req.task, must_sched);
853
854 return err;
855}
856
Parav Pandit063af592016-09-28 20:24:12 +0000857static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
858 struct ib_send_wr **bad_wr)
859{
860 struct rxe_qp *qp = to_rqp(ibqp);
861
862 if (unlikely(!qp->valid)) {
863 *bad_wr = wr;
864 return -EINVAL;
865 }
866
867 if (unlikely(qp->req.state < QP_STATE_READY)) {
868 *bad_wr = wr;
869 return -EINVAL;
870 }
871
872 if (qp->is_user) {
873 /* Utilize process context to do protocol processing */
874 rxe_run_task(&qp->req.task, 0);
875 return 0;
876 } else
877 return rxe_post_send_kernel(qp, wr, bad_wr);
878}
879
Moni Shoua8700e3e2016-06-16 16:45:23 +0300880static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
881 struct ib_recv_wr **bad_wr)
882{
883 int err = 0;
884 struct rxe_qp *qp = to_rqp(ibqp);
885 struct rxe_rq *rq = &qp->rq;
886 unsigned long flags;
887
888 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
889 *bad_wr = wr;
890 err = -EINVAL;
891 goto err1;
892 }
893
894 if (unlikely(qp->srq)) {
895 *bad_wr = wr;
896 err = -EINVAL;
897 goto err1;
898 }
899
900 spin_lock_irqsave(&rq->producer_lock, flags);
901
902 while (wr) {
903 err = post_one_recv(rq, wr);
904 if (unlikely(err)) {
905 *bad_wr = wr;
906 break;
907 }
908 wr = wr->next;
909 }
910
911 spin_unlock_irqrestore(&rq->producer_lock, flags);
912
913err1:
914 return err;
915}
916
917static struct ib_cq *rxe_create_cq(struct ib_device *dev,
918 const struct ib_cq_init_attr *attr,
919 struct ib_ucontext *context,
920 struct ib_udata *udata)
921{
922 int err;
923 struct rxe_dev *rxe = to_rdev(dev);
924 struct rxe_cq *cq;
925
926 if (attr->flags)
927 return ERR_PTR(-EINVAL);
928
929 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
930 if (err)
931 goto err1;
932
933 cq = rxe_alloc(&rxe->cq_pool);
934 if (!cq) {
935 err = -ENOMEM;
936 goto err1;
937 }
938
939 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
940 context, udata);
941 if (err)
942 goto err2;
943
944 return &cq->ibcq;
945
946err2:
947 rxe_drop_ref(cq);
948err1:
949 return ERR_PTR(err);
950}
951
952static int rxe_destroy_cq(struct ib_cq *ibcq)
953{
954 struct rxe_cq *cq = to_rcq(ibcq);
955
956 rxe_drop_ref(cq);
957 return 0;
958}
959
960static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
961{
962 int err;
963 struct rxe_cq *cq = to_rcq(ibcq);
964 struct rxe_dev *rxe = to_rdev(ibcq->device);
965
966 err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
967 if (err)
968 goto err1;
969
970 err = rxe_cq_resize_queue(cq, cqe, udata);
971 if (err)
972 goto err1;
973
974 return 0;
975
976err1:
977 return err;
978}
979
980static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
981{
982 int i;
983 struct rxe_cq *cq = to_rcq(ibcq);
984 struct rxe_cqe *cqe;
985 unsigned long flags;
986
987 spin_lock_irqsave(&cq->cq_lock, flags);
988 for (i = 0; i < num_entries; i++) {
989 cqe = queue_head(cq->queue);
990 if (!cqe)
991 break;
992
993 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
994 advance_consumer(cq->queue);
995 }
996 spin_unlock_irqrestore(&cq->cq_lock, flags);
997
998 return i;
999}
1000
1001static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
1002{
1003 struct rxe_cq *cq = to_rcq(ibcq);
1004 int count = queue_count(cq->queue);
1005
1006 return (count > wc_cnt) ? wc_cnt : count;
1007}
1008
1009static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1010{
1011 struct rxe_cq *cq = to_rcq(ibcq);
1012
1013 if (cq->notify != IB_CQ_NEXT_COMP)
1014 cq->notify = flags & IB_CQ_SOLICITED_MASK;
1015
1016 return 0;
1017}
1018
1019static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1020{
1021 struct rxe_dev *rxe = to_rdev(ibpd->device);
1022 struct rxe_pd *pd = to_rpd(ibpd);
1023 struct rxe_mem *mr;
1024 int err;
1025
1026 mr = rxe_alloc(&rxe->mr_pool);
1027 if (!mr) {
1028 err = -ENOMEM;
1029 goto err1;
1030 }
1031
1032 rxe_add_index(mr);
1033
1034 rxe_add_ref(pd);
1035
1036 err = rxe_mem_init_dma(rxe, pd, access, mr);
1037 if (err)
1038 goto err2;
1039
1040 return &mr->ibmr;
1041
1042err2:
1043 rxe_drop_ref(pd);
1044 rxe_drop_index(mr);
1045 rxe_drop_ref(mr);
1046err1:
1047 return ERR_PTR(err);
1048}
1049
1050static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1051 u64 start,
1052 u64 length,
1053 u64 iova,
1054 int access, struct ib_udata *udata)
1055{
1056 int err;
1057 struct rxe_dev *rxe = to_rdev(ibpd->device);
1058 struct rxe_pd *pd = to_rpd(ibpd);
1059 struct rxe_mem *mr;
1060
1061 mr = rxe_alloc(&rxe->mr_pool);
1062 if (!mr) {
1063 err = -ENOMEM;
1064 goto err2;
1065 }
1066
1067 rxe_add_index(mr);
1068
1069 rxe_add_ref(pd);
1070
1071 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1072 access, udata, mr);
1073 if (err)
1074 goto err3;
1075
1076 return &mr->ibmr;
1077
1078err3:
1079 rxe_drop_ref(pd);
1080 rxe_drop_index(mr);
1081 rxe_drop_ref(mr);
1082err2:
1083 return ERR_PTR(err);
1084}
1085
1086static int rxe_dereg_mr(struct ib_mr *ibmr)
1087{
1088 struct rxe_mem *mr = to_rmr(ibmr);
1089
1090 mr->state = RXE_MEM_STATE_ZOMBIE;
1091 rxe_drop_ref(mr->pd);
1092 rxe_drop_index(mr);
1093 rxe_drop_ref(mr);
1094 return 0;
1095}
1096
1097static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1098 enum ib_mr_type mr_type,
1099 u32 max_num_sg)
1100{
1101 struct rxe_dev *rxe = to_rdev(ibpd->device);
1102 struct rxe_pd *pd = to_rpd(ibpd);
1103 struct rxe_mem *mr;
1104 int err;
1105
1106 if (mr_type != IB_MR_TYPE_MEM_REG)
1107 return ERR_PTR(-EINVAL);
1108
1109 mr = rxe_alloc(&rxe->mr_pool);
1110 if (!mr) {
1111 err = -ENOMEM;
1112 goto err1;
1113 }
1114
1115 rxe_add_index(mr);
1116
1117 rxe_add_ref(pd);
1118
1119 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1120 if (err)
1121 goto err2;
1122
1123 return &mr->ibmr;
1124
1125err2:
1126 rxe_drop_ref(pd);
1127 rxe_drop_index(mr);
1128 rxe_drop_ref(mr);
1129err1:
1130 return ERR_PTR(err);
1131}
1132
1133static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1134{
1135 struct rxe_mem *mr = to_rmr(ibmr);
1136 struct rxe_map *map;
1137 struct rxe_phys_buf *buf;
1138
1139 if (unlikely(mr->nbuf == mr->num_buf))
1140 return -ENOMEM;
1141
1142 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1143 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1144
1145 buf->addr = addr;
1146 buf->size = ibmr->page_size;
1147 mr->nbuf++;
1148
1149 return 0;
1150}
1151
Parav Pandite404f942016-09-28 20:26:26 +00001152static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1153 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001154{
1155 struct rxe_mem *mr = to_rmr(ibmr);
1156 int n;
1157
1158 mr->nbuf = 0;
1159
1160 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1161
1162 mr->va = ibmr->iova;
1163 mr->iova = ibmr->iova;
1164 mr->length = ibmr->length;
1165 mr->page_shift = ilog2(ibmr->page_size);
1166 mr->page_mask = ibmr->page_size - 1;
1167 mr->offset = mr->iova & mr->page_mask;
1168
1169 return n;
1170}
1171
1172static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1173{
1174 int err;
1175 struct rxe_dev *rxe = to_rdev(ibqp->device);
1176 struct rxe_qp *qp = to_rqp(ibqp);
1177 struct rxe_mc_grp *grp;
1178
1179 /* takes a ref on grp if successful */
1180 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1181 if (err)
1182 return err;
1183
1184 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1185
1186 rxe_drop_ref(grp);
1187 return err;
1188}
1189
1190static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1191{
1192 struct rxe_dev *rxe = to_rdev(ibqp->device);
1193 struct rxe_qp *qp = to_rqp(ibqp);
1194
1195 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1196}
1197
1198static ssize_t rxe_show_parent(struct device *device,
1199 struct device_attribute *attr, char *buf)
1200{
1201 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1202 ib_dev.dev);
1203 char *name;
1204
1205 name = rxe->ifc_ops->parent_name(rxe, 1);
1206 return snprintf(buf, 16, "%s\n", name);
1207}
1208
1209static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
1210
1211static struct device_attribute *rxe_dev_attributes[] = {
1212 &dev_attr_parent,
1213};
1214
1215int rxe_register_device(struct rxe_dev *rxe)
1216{
1217 int err;
1218 int i;
1219 struct ib_device *dev = &rxe->ib_dev;
1220
1221 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1222 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1223
1224 dev->owner = THIS_MODULE;
1225 dev->node_type = RDMA_NODE_IB_CA;
1226 dev->phys_port_cnt = 1;
1227 dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
1228 dev->dma_device = rxe->ifc_ops->dma_device(rxe);
1229 dev->local_dma_lkey = 0;
1230 dev->node_guid = rxe->ifc_ops->node_guid(rxe);
1231 dev->dma_ops = &rxe_dma_mapping_ops;
1232
1233 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1234 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1235 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1236 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1237 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1238 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1239 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1240 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1241 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1242 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1243 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1244 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1245 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1246 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1247 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1248 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1249 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1250 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1251 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1252 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1253 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1254 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1255 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1256 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1257 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1258 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1259 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1260 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1261 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1262 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1263 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1264 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1265 ;
1266
1267 dev->query_device = rxe_query_device;
1268 dev->modify_device = rxe_modify_device;
1269 dev->query_port = rxe_query_port;
1270 dev->modify_port = rxe_modify_port;
1271 dev->get_link_layer = rxe_get_link_layer;
1272 dev->query_gid = rxe_query_gid;
1273 dev->get_netdev = rxe_get_netdev;
1274 dev->add_gid = rxe_add_gid;
1275 dev->del_gid = rxe_del_gid;
1276 dev->query_pkey = rxe_query_pkey;
1277 dev->alloc_ucontext = rxe_alloc_ucontext;
1278 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1279 dev->mmap = rxe_mmap;
1280 dev->get_port_immutable = rxe_port_immutable;
1281 dev->alloc_pd = rxe_alloc_pd;
1282 dev->dealloc_pd = rxe_dealloc_pd;
1283 dev->create_ah = rxe_create_ah;
1284 dev->modify_ah = rxe_modify_ah;
1285 dev->query_ah = rxe_query_ah;
1286 dev->destroy_ah = rxe_destroy_ah;
1287 dev->create_srq = rxe_create_srq;
1288 dev->modify_srq = rxe_modify_srq;
1289 dev->query_srq = rxe_query_srq;
1290 dev->destroy_srq = rxe_destroy_srq;
1291 dev->post_srq_recv = rxe_post_srq_recv;
1292 dev->create_qp = rxe_create_qp;
1293 dev->modify_qp = rxe_modify_qp;
1294 dev->query_qp = rxe_query_qp;
1295 dev->destroy_qp = rxe_destroy_qp;
1296 dev->post_send = rxe_post_send;
1297 dev->post_recv = rxe_post_recv;
1298 dev->create_cq = rxe_create_cq;
1299 dev->destroy_cq = rxe_destroy_cq;
1300 dev->resize_cq = rxe_resize_cq;
1301 dev->poll_cq = rxe_poll_cq;
1302 dev->peek_cq = rxe_peek_cq;
1303 dev->req_notify_cq = rxe_req_notify_cq;
1304 dev->get_dma_mr = rxe_get_dma_mr;
1305 dev->reg_user_mr = rxe_reg_user_mr;
1306 dev->dereg_mr = rxe_dereg_mr;
1307 dev->alloc_mr = rxe_alloc_mr;
1308 dev->map_mr_sg = rxe_map_mr_sg;
1309 dev->attach_mcast = rxe_attach_mcast;
1310 dev->detach_mcast = rxe_detach_mcast;
1311
1312 err = ib_register_device(dev, NULL);
1313 if (err) {
1314 pr_warn("rxe_register_device failed, err = %d\n", err);
1315 goto err1;
1316 }
1317
1318 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1319 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1320 if (err) {
1321 pr_warn("device_create_file failed, i = %d, err = %d\n",
1322 i, err);
1323 goto err2;
1324 }
1325 }
1326
1327 return 0;
1328
1329err2:
1330 ib_unregister_device(dev);
1331err1:
1332 return err;
1333}
1334
1335int rxe_unregister_device(struct rxe_dev *rxe)
1336{
1337 int i;
1338 struct ib_device *dev = &rxe->ib_dev;
1339
1340 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1341 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1342
1343 ib_unregister_device(dev);
1344
1345 return 0;
1346}