blob: 29d30744d6c9e37653e872132b7ce5ca3385dedb [file] [log] [blame]
Steve Wiseb038ced2007-02-12 16:16:18 -08001/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
Steve Wiseb038ced2007-02-12 16:16:18 -08003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/delay.h>
38#include <linux/errno.h>
39#include <linux/list.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010040#include <linux/sched/mm.h>
Steve Wiseb038ced2007-02-12 16:16:18 -080041#include <linux/spinlock.h>
42#include <linux/ethtool.h>
Steve Wise7f049f22007-11-26 11:28:44 -060043#include <linux/rtnetlink.h>
Steve Wise7ab1a2b2009-05-27 14:42:36 -070044#include <linux/inetdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090045#include <linux/slab.h>
Steve Wiseb038ced2007-02-12 16:16:18 -080046
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/byteorder.h>
50
51#include <rdma/iw_cm.h>
52#include <rdma/ib_verbs.h>
53#include <rdma/ib_smi.h>
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080054#include <rdma/ib_umem.h>
Steve Wiseb038ced2007-02-12 16:16:18 -080055#include <rdma/ib_user_verbs.h>
56
57#include "cxio_hal.h"
58#include "iwch.h"
59#include "iwch_provider.h"
60#include "iwch_cm.h"
Leon Romanovskya85fb332016-09-22 17:31:12 +030061#include <rdma/cxgb3-abi.h>
Steve Wise14cc1802008-07-14 23:48:48 -070062#include "common.h"
Steve Wiseb038ced2007-02-12 16:16:18 -080063
Steve Wiseb038ced2007-02-12 16:16:18 -080064static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -040065 struct rdma_ah_attr *ah_attr,
Moni Shoua477864c2016-11-23 08:23:24 +020066 struct ib_udata *udata)
Steve Wiseb038ced2007-02-12 16:16:18 -080067{
68 return ERR_PTR(-ENOSYS);
69}
70
71static int iwch_ah_destroy(struct ib_ah *ah)
72{
73 return -ENOSYS;
74}
75
76static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
77{
78 return -ENOSYS;
79}
80
81static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
82{
83 return -ENOSYS;
84}
85
86static int iwch_process_mad(struct ib_device *ibdev,
87 int mad_flags,
88 u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -040089 const struct ib_wc *in_wc,
90 const struct ib_grh *in_grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -040091 const struct ib_mad_hdr *in_mad,
92 size_t in_mad_size,
93 struct ib_mad_hdr *out_mad,
94 size_t *out_mad_size,
95 u16 *out_mad_pkey_index)
Steve Wiseb038ced2007-02-12 16:16:18 -080096{
97 return -ENOSYS;
98}
99
100static int iwch_dealloc_ucontext(struct ib_ucontext *context)
101{
102 struct iwch_dev *rhp = to_iwch_dev(context->device);
103 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
104 struct iwch_mm_entry *mm, *tmp;
105
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800106 pr_debug("%s context %p\n", __func__, context);
Steve Wiseb038ced2007-02-12 16:16:18 -0800107 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
108 kfree(mm);
109 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
110 kfree(ucontext);
111 return 0;
112}
113
114static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
115 struct ib_udata *udata)
116{
117 struct iwch_ucontext *context;
118 struct iwch_dev *rhp = to_iwch_dev(ibdev);
119
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800120 pr_debug("%s ibdev %p\n", __func__, ibdev);
Steve Wiseb038ced2007-02-12 16:16:18 -0800121 context = kzalloc(sizeof(*context), GFP_KERNEL);
122 if (!context)
123 return ERR_PTR(-ENOMEM);
124 cxio_init_ucontext(&rhp->rdev, &context->uctx);
125 INIT_LIST_HEAD(&context->mmaps);
126 spin_lock_init(&context->mmap_lock);
127 return &context->ibucontext;
128}
129
130static int iwch_destroy_cq(struct ib_cq *ib_cq)
131{
132 struct iwch_cq *chp;
133
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800134 pr_debug("%s ib_cq %p\n", __func__, ib_cq);
Steve Wiseb038ced2007-02-12 16:16:18 -0800135 chp = to_iwch_cq(ib_cq);
136
137 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
138 atomic_dec(&chp->refcnt);
139 wait_event(chp->wait, !atomic_read(&chp->refcnt));
140
141 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
142 kfree(chp);
143 return 0;
144}
145
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300146static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
147 const struct ib_cq_init_attr *attr,
148 struct ib_ucontext *ib_context,
149 struct ib_udata *udata)
Steve Wiseb038ced2007-02-12 16:16:18 -0800150{
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300151 int entries = attr->cqe;
Steve Wiseb038ced2007-02-12 16:16:18 -0800152 struct iwch_dev *rhp;
153 struct iwch_cq *chp;
154 struct iwch_create_cq_resp uresp;
155 struct iwch_create_cq_req ureq;
156 struct iwch_ucontext *ucontext = NULL;
Steve Wiseb9551502010-10-21 12:37:06 +0000157 static int warned;
158 size_t resplen;
Steve Wiseb038ced2007-02-12 16:16:18 -0800159
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800160 pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300161 if (attr->flags)
162 return ERR_PTR(-EINVAL);
163
Steve Wiseb038ced2007-02-12 16:16:18 -0800164 rhp = to_iwch_dev(ibdev);
165 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
166 if (!chp)
167 return ERR_PTR(-ENOMEM);
168
169 if (ib_context) {
170 ucontext = to_iwch_ucontext(ib_context);
171 if (!t3a_device(rhp)) {
172 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
173 kfree(chp);
174 return ERR_PTR(-EFAULT);
175 }
176 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
177 }
178 }
179
180 if (t3a_device(rhp)) {
181
182 /*
183 * T3A: Add some fluff to handle extra CQEs inserted
184 * for various errors.
185 * Additional CQE possibilities:
186 * TERMINATE,
187 * incoming RDMA WRITE Failures
188 * incoming RDMA READ REQUEST FAILUREs
189 * NOTE: We cannot ensure the CQ won't overflow.
190 */
191 entries += 16;
192 }
193 entries = roundup_pow_of_two(entries);
194 chp->cq.size_log2 = ilog2(entries);
195
Steve Wise5279d3a2010-01-27 20:22:34 +0000196 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800197 kfree(chp);
198 return ERR_PTR(-ENOMEM);
199 }
200 chp->rhp = rhp;
Jon Mason4fa45722008-03-09 13:54:12 -0700201 chp->ibcq.cqe = 1 << chp->cq.size_log2;
Steve Wiseb038ced2007-02-12 16:16:18 -0800202 spin_lock_init(&chp->lock);
Kumar Sanghvif7cc25d2011-10-24 21:20:22 +0530203 spin_lock_init(&chp->comp_handler_lock);
Steve Wiseb038ced2007-02-12 16:16:18 -0800204 atomic_set(&chp->refcnt, 1);
205 init_waitqueue_head(&chp->wait);
Steve Wise13a23932009-09-09 11:25:55 -0700206 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
207 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
208 kfree(chp);
209 return ERR_PTR(-ENOMEM);
210 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800211
212 if (ucontext) {
213 struct iwch_mm_entry *mm;
214
215 mm = kmalloc(sizeof *mm, GFP_KERNEL);
216 if (!mm) {
217 iwch_destroy_cq(&chp->ibcq);
218 return ERR_PTR(-ENOMEM);
219 }
220 uresp.cqid = chp->cq.cqid;
221 uresp.size_log2 = chp->cq.size_log2;
222 spin_lock(&ucontext->mmap_lock);
223 uresp.key = ucontext->key;
224 ucontext->key += PAGE_SIZE;
225 spin_unlock(&ucontext->mmap_lock);
Steve Wiseb9551502010-10-21 12:37:06 +0000226 mm->key = uresp.key;
227 mm->addr = virt_to_phys(chp->cq.queue);
228 if (udata->outlen < sizeof uresp) {
229 if (!warned++)
Joe Perches46b2d4e2017-02-09 14:23:48 -0800230 pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
Steve Wiseb9551502010-10-21 12:37:06 +0000231 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
232 sizeof(struct t3_cqe));
233 resplen = sizeof(struct iwch_create_cq_resp_v0);
234 } else {
235 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
236 sizeof(struct t3_cqe));
237 uresp.memsize = mm->len;
Dan Carpenter246fcdb2013-07-29 22:19:14 +0300238 uresp.reserved = 0;
Steve Wiseb9551502010-10-21 12:37:06 +0000239 resplen = sizeof uresp;
240 }
241 if (ib_copy_to_udata(udata, &uresp, resplen)) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800242 kfree(mm);
243 iwch_destroy_cq(&chp->ibcq);
244 return ERR_PTR(-EFAULT);
245 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800246 insert_mmap(ucontext, mm);
247 }
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800248 pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
249 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
250 (unsigned long long)chp->cq.dma_addr);
Steve Wiseb038ced2007-02-12 16:16:18 -0800251 return &chp->ibcq;
252}
253
254static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
255{
256#ifdef notyet
257 struct iwch_cq *chp = to_iwch_cq(cq);
258 struct t3_cq oldcq, newcq;
259 int ret;
260
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800261 pr_debug("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
Steve Wiseb038ced2007-02-12 16:16:18 -0800262
263 /* We don't downsize... */
264 if (cqe <= cq->cqe)
265 return 0;
266
267 /* create new t3_cq with new size */
268 cqe = roundup_pow_of_two(cqe+1);
269 newcq.size_log2 = ilog2(cqe);
270
271 /* Dont allow resize to less than the current wce count */
272 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
273 return -ENOMEM;
274 }
275
276 /* Quiesce all QPs using this CQ */
277 ret = iwch_quiesce_qps(chp);
278 if (ret) {
279 return ret;
280 }
281
282 ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
283 if (ret) {
284 return ret;
285 }
286
287 /* copy CQEs */
288 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
289 sizeof(struct t3_cqe));
290
291 /* old iwch_qp gets new t3_cq but keeps old cqid */
292 oldcq = chp->cq;
293 chp->cq = newcq;
294 chp->cq.cqid = oldcq.cqid;
295
296 /* resize new t3_cq to update the HW context */
297 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
298 if (ret) {
299 chp->cq = oldcq;
300 return ret;
301 }
302 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
303
304 /* destroy old t3_cq */
305 oldcq.cqid = newcq.cqid;
306 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
307 if (ret) {
Joe Perches46b2d4e2017-02-09 14:23:48 -0800308 pr_err("%s - cxio_destroy_cq failed %d\n", __func__, ret);
Steve Wiseb038ced2007-02-12 16:16:18 -0800309 }
310
311 /* add user hooks here */
312
313 /* resume qps */
314 ret = iwch_resume_qps(chp);
315 return ret;
316#else
317 return -ENOSYS;
318#endif
319}
320
Roland Dreiered23a722007-05-06 21:02:48 -0700321static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
Steve Wiseb038ced2007-02-12 16:16:18 -0800322{
323 struct iwch_dev *rhp;
324 struct iwch_cq *chp;
325 enum t3_cq_opcode cq_op;
326 int err;
327 unsigned long flag;
328 u32 rptr;
329
330 chp = to_iwch_cq(ibcq);
331 rhp = chp->rhp;
Roland Dreiered23a722007-05-06 21:02:48 -0700332 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
Steve Wiseb038ced2007-02-12 16:16:18 -0800333 cq_op = CQ_ARM_SE;
334 else
335 cq_op = CQ_ARM_AN;
336 if (chp->user_rptr_addr) {
337 if (get_user(rptr, chp->user_rptr_addr))
338 return -EFAULT;
339 spin_lock_irqsave(&chp->lock, flag);
340 chp->cq.rptr = rptr;
341 } else
342 spin_lock_irqsave(&chp->lock, flag);
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800343 pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr);
Steve Wiseb038ced2007-02-12 16:16:18 -0800344 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
345 spin_unlock_irqrestore(&chp->lock, flag);
Roland Dreiered23a722007-05-06 21:02:48 -0700346 if (err < 0)
Joe Perches46b2d4e2017-02-09 14:23:48 -0800347 pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid);
Roland Dreiered23a722007-05-06 21:02:48 -0700348 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
349 err = 0;
Steve Wiseb038ced2007-02-12 16:16:18 -0800350 return err;
351}
352
353static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
354{
355 int len = vma->vm_end - vma->vm_start;
356 u32 key = vma->vm_pgoff << PAGE_SHIFT;
357 struct cxio_rdev *rdev_p;
358 int ret = 0;
359 struct iwch_mm_entry *mm;
360 struct iwch_ucontext *ucontext;
Steve Wiseaeb100e2007-03-02 16:06:36 -0600361 u64 addr;
Steve Wiseb038ced2007-02-12 16:16:18 -0800362
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800363 pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
364 key, len);
Steve Wiseb038ced2007-02-12 16:16:18 -0800365
366 if (vma->vm_start & (PAGE_SIZE-1)) {
367 return -EINVAL;
368 }
369
370 rdev_p = &(to_iwch_dev(context->device)->rdev);
371 ucontext = to_iwch_ucontext(context);
372
373 mm = remove_mmap(ucontext, key, len);
374 if (!mm)
375 return -EINVAL;
Steve Wiseaeb100e2007-03-02 16:06:36 -0600376 addr = mm->addr;
Steve Wiseb038ced2007-02-12 16:16:18 -0800377 kfree(mm);
378
Steve Wiseaeb100e2007-03-02 16:06:36 -0600379 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
380 (addr < (rdev_p->rnic_info.udbell_physbase +
Steve Wiseb038ced2007-02-12 16:16:18 -0800381 rdev_p->rnic_info.udbell_len))) {
382
383 /*
384 * Map T3 DB register.
385 */
386 if (vma->vm_flags & VM_READ) {
387 return -EPERM;
388 }
389
390 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
391 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
392 vma->vm_flags &= ~VM_MAYREAD;
393 ret = io_remap_pfn_range(vma, vma->vm_start,
Steve Wiseaeb100e2007-03-02 16:06:36 -0600394 addr >> PAGE_SHIFT,
Steve Wiseb038ced2007-02-12 16:16:18 -0800395 len, vma->vm_page_prot);
396 } else {
397
398 /*
399 * Map WQ or CQ contig dma memory...
400 */
401 ret = remap_pfn_range(vma, vma->vm_start,
Steve Wiseaeb100e2007-03-02 16:06:36 -0600402 addr >> PAGE_SHIFT,
Steve Wiseb038ced2007-02-12 16:16:18 -0800403 len, vma->vm_page_prot);
404 }
405
406 return ret;
407}
408
409static int iwch_deallocate_pd(struct ib_pd *pd)
410{
411 struct iwch_dev *rhp;
412 struct iwch_pd *php;
413
414 php = to_iwch_pd(pd);
415 rhp = php->rhp;
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800416 pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
Steve Wiseb038ced2007-02-12 16:16:18 -0800417 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
418 kfree(php);
419 return 0;
420}
421
422static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
423 struct ib_ucontext *context,
424 struct ib_udata *udata)
425{
426 struct iwch_pd *php;
427 u32 pdid;
428 struct iwch_dev *rhp;
429
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800430 pr_debug("%s ibdev %p\n", __func__, ibdev);
Steve Wiseb038ced2007-02-12 16:16:18 -0800431 rhp = (struct iwch_dev *) ibdev;
432 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
433 if (!pdid)
434 return ERR_PTR(-EINVAL);
435 php = kzalloc(sizeof(*php), GFP_KERNEL);
436 if (!php) {
437 cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
438 return ERR_PTR(-ENOMEM);
439 }
440 php->pdid = pdid;
441 php->rhp = rhp;
442 if (context) {
443 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
444 iwch_deallocate_pd(&php->ibpd);
445 return ERR_PTR(-EFAULT);
446 }
447 }
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800448 pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
Steve Wiseb038ced2007-02-12 16:16:18 -0800449 return &php->ibpd;
450}
451
452static int iwch_dereg_mr(struct ib_mr *ib_mr)
453{
454 struct iwch_dev *rhp;
455 struct iwch_mr *mhp;
456 u32 mmid;
457
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800458 pr_debug("%s ib_mr %p\n", __func__, ib_mr);
Steve Wiseb038ced2007-02-12 16:16:18 -0800459
460 mhp = to_iwch_mr(ib_mr);
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300461 kfree(mhp->pages);
Steve Wiseb038ced2007-02-12 16:16:18 -0800462 rhp = mhp->rhp;
463 mmid = mhp->attr.stag >> 8;
464 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
465 mhp->attr.pbl_addr);
Roland Dreier273748c2008-05-06 15:56:22 -0700466 iwch_free_pbl(mhp);
Steve Wiseb038ced2007-02-12 16:16:18 -0800467 remove_handle(rhp, &rhp->mmidr, mmid);
468 if (mhp->kva)
469 kfree((void *) (unsigned long) mhp->kva);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800470 if (mhp->umem)
471 ib_umem_release(mhp->umem);
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800472 pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
Steve Wiseb038ced2007-02-12 16:16:18 -0800473 kfree(mhp);
474 return 0;
475}
476
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100477static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
Steve Wiseb038ced2007-02-12 16:16:18 -0800478{
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100479 const u64 total_size = 0xffffffff;
480 const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
481 struct iwch_pd *php = to_iwch_pd(pd);
482 struct iwch_dev *rhp = php->rhp;
Steve Wiseb038ced2007-02-12 16:16:18 -0800483 struct iwch_mr *mhp;
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100484 __be64 *page_list;
485 int shift = 26, npages, ret, i;
Steve Wiseb038ced2007-02-12 16:16:18 -0800486
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800487 pr_debug("%s ib_pd %p\n", __func__, pd);
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100488
489 /*
490 * T3 only supports 32 bits of size.
491 */
492 if (sizeof(phys_addr_t) > 4) {
Joe Perches46b2d4e2017-02-09 14:23:48 -0800493 pr_warn_once("Cannot support dma_mrs on this platform\n");
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100494 return ERR_PTR(-ENOTSUPP);
495 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800496
Steve Wiseb038ced2007-02-12 16:16:18 -0800497 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
498 if (!mhp)
499 return ERR_PTR(-ENOMEM);
500
Roland Dreier273748c2008-05-06 15:56:22 -0700501 mhp->rhp = rhp;
502
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100503 npages = (total_size + (1ULL << shift) - 1) >> shift;
504 if (!npages) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800505 ret = -EINVAL;
506 goto err;
507 }
508
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100509 page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
510 if (!page_list) {
511 ret = -ENOMEM;
Steve Wiseb038ced2007-02-12 16:16:18 -0800512 goto err;
513 }
514
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100515 for (i = 0; i < npages; i++)
516 page_list[i] = cpu_to_be64((u64)i << shift);
517
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800518 pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
519 __func__, mask, shift, total_size, npages);
Steve Wiseb038ced2007-02-12 16:16:18 -0800520
Roland Dreier273748c2008-05-06 15:56:22 -0700521 ret = iwch_alloc_pbl(mhp, npages);
522 if (ret) {
523 kfree(page_list);
524 goto err_pbl;
525 }
526
527 ret = iwch_write_pbl(mhp, page_list, npages, 0);
528 kfree(page_list);
529 if (ret)
530 goto err_pbl;
531
Steve Wiseb038ced2007-02-12 16:16:18 -0800532 mhp->attr.pdid = php->pdid;
533 mhp->attr.zbva = 0;
534
Steve Wisee64518f2007-03-06 14:44:07 -0600535 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100536 mhp->attr.va_fbo = 0;
Steve Wiseb038ced2007-02-12 16:16:18 -0800537 mhp->attr.page_size = shift - 12;
538
539 mhp->attr.len = (u32) total_size;
540 mhp->attr.pbl_size = npages;
Roland Dreier273748c2008-05-06 15:56:22 -0700541 ret = iwch_register_mem(rhp, php, mhp, shift);
542 if (ret)
543 goto err_pbl;
544
Steve Wiseb038ced2007-02-12 16:16:18 -0800545 return &mhp->ibmr;
Roland Dreier273748c2008-05-06 15:56:22 -0700546
547err_pbl:
548 iwch_free_pbl(mhp);
549
Steve Wiseb038ced2007-02-12 16:16:18 -0800550err:
551 kfree(mhp);
552 return ERR_PTR(ret);
Steve Wiseb038ced2007-02-12 16:16:18 -0800553}
554
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800555static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
556 u64 virt, int acc, struct ib_udata *udata)
Steve Wiseb038ced2007-02-12 16:16:18 -0800557{
558 __be64 *pages;
559 int shift, n, len;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200560 int i, k, entry;
Steve Wiseb038ced2007-02-12 16:16:18 -0800561 int err = 0;
Steve Wiseb038ced2007-02-12 16:16:18 -0800562 struct iwch_dev *rhp;
563 struct iwch_pd *php;
564 struct iwch_mr *mhp;
565 struct iwch_reg_user_mr_resp uresp;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200566 struct scatterlist *sg;
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800567 pr_debug("%s ib_pd %p\n", __func__, pd);
Steve Wiseb038ced2007-02-12 16:16:18 -0800568
569 php = to_iwch_pd(pd);
570 rhp = php->rhp;
571 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
572 if (!mhp)
573 return ERR_PTR(-ENOMEM);
574
Roland Dreier273748c2008-05-06 15:56:22 -0700575 mhp->rhp = rhp;
576
Arthur Kepnercb9fbc52008-04-29 01:00:34 -0700577 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800578 if (IS_ERR(mhp->umem)) {
579 err = PTR_ERR(mhp->umem);
580 kfree(mhp);
581 return ERR_PTR(err);
582 }
583
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300584 shift = mhp->umem->page_shift;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800585
Yishai Hadaseeb84612014-01-28 13:40:15 +0200586 n = mhp->umem->nmap;
Steve Wiseb038ced2007-02-12 16:16:18 -0800587
Roland Dreier273748c2008-05-06 15:56:22 -0700588 err = iwch_alloc_pbl(mhp, n);
589 if (err)
590 goto err;
591
592 pages = (__be64 *) __get_free_page(GFP_KERNEL);
Steve Wiseb038ced2007-02-12 16:16:18 -0800593 if (!pages) {
594 err = -ENOMEM;
Roland Dreier273748c2008-05-06 15:56:22 -0700595 goto err_pbl;
Steve Wiseb038ced2007-02-12 16:16:18 -0800596 }
597
Steve Wiseb038ced2007-02-12 16:16:18 -0800598 i = n = 0;
599
Yishai Hadaseeb84612014-01-28 13:40:15 +0200600 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
601 len = sg_dma_len(sg) >> shift;
Steve Wiseb038ced2007-02-12 16:16:18 -0800602 for (k = 0; k < len; ++k) {
Yishai Hadaseeb84612014-01-28 13:40:15 +0200603 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300604 (k << shift));
Roland Dreier273748c2008-05-06 15:56:22 -0700605 if (i == PAGE_SIZE / sizeof *pages) {
606 err = iwch_write_pbl(mhp, pages, i, n);
607 if (err)
608 goto pbl_done;
609 n += i;
610 i = 0;
611 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800612 }
Yishai Hadaseeb84612014-01-28 13:40:15 +0200613 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800614
Roland Dreier273748c2008-05-06 15:56:22 -0700615 if (i)
616 err = iwch_write_pbl(mhp, pages, i, n);
617
618pbl_done:
619 free_page((unsigned long) pages);
620 if (err)
621 goto err_pbl;
622
Steve Wiseb038ced2007-02-12 16:16:18 -0800623 mhp->attr.pdid = php->pdid;
624 mhp->attr.zbva = 0;
Steve Wisee64518f2007-03-06 14:44:07 -0600625 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800626 mhp->attr.va_fbo = virt;
Steve Wiseb038ced2007-02-12 16:16:18 -0800627 mhp->attr.page_size = shift - 12;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800628 mhp->attr.len = (u32) length;
Roland Dreier273748c2008-05-06 15:56:22 -0700629
630 err = iwch_register_mem(rhp, php, mhp, shift);
Steve Wiseb038ced2007-02-12 16:16:18 -0800631 if (err)
Roland Dreier273748c2008-05-06 15:56:22 -0700632 goto err_pbl;
Steve Wiseb038ced2007-02-12 16:16:18 -0800633
Steve Wise8176d292008-01-24 16:30:16 -0600634 if (udata && !t3a_device(rhp)) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800635 uresp.pbl_addr = (mhp->attr.pbl_addr -
Roland Dreier273748c2008-05-06 15:56:22 -0700636 rhp->rdev.rnic_info.pbl_base) >> 3;
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800637 pr_debug("%s user resp pbl_addr 0x%x\n", __func__,
638 uresp.pbl_addr);
Steve Wiseb038ced2007-02-12 16:16:18 -0800639
640 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
641 iwch_dereg_mr(&mhp->ibmr);
642 err = -EFAULT;
643 goto err;
644 }
645 }
646
647 return &mhp->ibmr;
648
Roland Dreier273748c2008-05-06 15:56:22 -0700649err_pbl:
650 iwch_free_pbl(mhp);
651
Steve Wiseb038ced2007-02-12 16:16:18 -0800652err:
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800653 ib_umem_release(mhp->umem);
Steve Wiseb038ced2007-02-12 16:16:18 -0800654 kfree(mhp);
655 return ERR_PTR(err);
656}
657
Matan Barakb2a239d2016-02-29 18:05:29 +0200658static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
659 struct ib_udata *udata)
Steve Wiseb038ced2007-02-12 16:16:18 -0800660{
661 struct iwch_dev *rhp;
662 struct iwch_pd *php;
663 struct iwch_mw *mhp;
664 u32 mmid;
665 u32 stag = 0;
666 int ret;
667
Shani Michaeli7083e422013-02-06 16:19:12 +0000668 if (type != IB_MW_TYPE_1)
669 return ERR_PTR(-EINVAL);
670
Steve Wiseb038ced2007-02-12 16:16:18 -0800671 php = to_iwch_pd(pd);
672 rhp = php->rhp;
673 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
674 if (!mhp)
675 return ERR_PTR(-ENOMEM);
676 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
677 if (ret) {
678 kfree(mhp);
679 return ERR_PTR(ret);
680 }
681 mhp->rhp = rhp;
682 mhp->attr.pdid = php->pdid;
683 mhp->attr.type = TPT_MW;
684 mhp->attr.stag = stag;
685 mmid = (stag) >> 8;
Steve Wise70fe1792008-07-14 23:48:49 -0700686 mhp->ibmw.rkey = stag;
Steve Wise13a23932009-09-09 11:25:55 -0700687 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
688 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
689 kfree(mhp);
690 return ERR_PTR(-ENOMEM);
691 }
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800692 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
Steve Wiseb038ced2007-02-12 16:16:18 -0800693 return &(mhp->ibmw);
694}
695
696static int iwch_dealloc_mw(struct ib_mw *mw)
697{
698 struct iwch_dev *rhp;
699 struct iwch_mw *mhp;
700 u32 mmid;
701
702 mhp = to_iwch_mw(mw);
703 rhp = mhp->rhp;
704 mmid = (mw->rkey) >> 8;
705 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
706 remove_handle(rhp, &rhp->mmidr, mmid);
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800707 pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
Jesper Juhlfe194f12013-01-14 20:34:09 +0100708 kfree(mhp);
Steve Wiseb038ced2007-02-12 16:16:18 -0800709 return 0;
710}
711
Sagi Grimbergf683d3b2015-07-30 10:32:45 +0300712static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
713 enum ib_mr_type mr_type,
714 u32 max_num_sg)
Steve Wisee7e55822008-07-14 23:48:45 -0700715{
716 struct iwch_dev *rhp;
717 struct iwch_pd *php;
718 struct iwch_mr *mhp;
719 u32 mmid;
720 u32 stag = 0;
Steve Wise13a23932009-09-09 11:25:55 -0700721 int ret = 0;
Steve Wisee7e55822008-07-14 23:48:45 -0700722
Sagi Grimbergf683d3b2015-07-30 10:32:45 +0300723 if (mr_type != IB_MR_TYPE_MEM_REG ||
724 max_num_sg > T3_MAX_FASTREG_DEPTH)
725 return ERR_PTR(-EINVAL);
726
Steve Wisee7e55822008-07-14 23:48:45 -0700727 php = to_iwch_pd(pd);
728 rhp = php->rhp;
729 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
730 if (!mhp)
Steve Wise13a23932009-09-09 11:25:55 -0700731 goto err;
Steve Wisee7e55822008-07-14 23:48:45 -0700732
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300733 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
734 if (!mhp->pages) {
735 ret = -ENOMEM;
736 goto pl_err;
737 }
738
Steve Wisee7e55822008-07-14 23:48:45 -0700739 mhp->rhp = rhp;
Sagi Grimbergf683d3b2015-07-30 10:32:45 +0300740 ret = iwch_alloc_pbl(mhp, max_num_sg);
Steve Wise13a23932009-09-09 11:25:55 -0700741 if (ret)
742 goto err1;
Sagi Grimbergf683d3b2015-07-30 10:32:45 +0300743 mhp->attr.pbl_size = max_num_sg;
Steve Wisee7e55822008-07-14 23:48:45 -0700744 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
745 mhp->attr.pbl_size, mhp->attr.pbl_addr);
Steve Wise13a23932009-09-09 11:25:55 -0700746 if (ret)
747 goto err2;
Steve Wisee7e55822008-07-14 23:48:45 -0700748 mhp->attr.pdid = php->pdid;
749 mhp->attr.type = TPT_NON_SHARED_MR;
750 mhp->attr.stag = stag;
751 mhp->attr.state = 1;
752 mmid = (stag) >> 8;
753 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
Steve Wise13a23932009-09-09 11:25:55 -0700754 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
755 goto err3;
756
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800757 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
Steve Wisee7e55822008-07-14 23:48:45 -0700758 return &(mhp->ibmr);
Steve Wise13a23932009-09-09 11:25:55 -0700759err3:
760 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
761 mhp->attr.pbl_addr);
762err2:
763 iwch_free_pbl(mhp);
764err1:
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300765 kfree(mhp->pages);
766pl_err:
Steve Wise13a23932009-09-09 11:25:55 -0700767 kfree(mhp);
768err:
769 return ERR_PTR(ret);
Steve Wisee7e55822008-07-14 23:48:45 -0700770}
771
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300772static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
773{
774 struct iwch_mr *mhp = to_iwch_mr(ibmr);
775
776 if (unlikely(mhp->npages == mhp->attr.pbl_size))
777 return -ENOMEM;
778
779 mhp->pages[mhp->npages++] = addr;
780
781 return 0;
782}
783
Christoph Hellwigff2ba992016-05-03 18:01:04 +0200784static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
Bart Van Assche9aa8b322016-05-12 10:49:15 -0700785 int sg_nents, unsigned int *sg_offset)
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300786{
787 struct iwch_mr *mhp = to_iwch_mr(ibmr);
788
789 mhp->npages = 0;
790
Christoph Hellwigff2ba992016-05-03 18:01:04 +0200791 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300792}
793
Steve Wiseb038ced2007-02-12 16:16:18 -0800794static int iwch_destroy_qp(struct ib_qp *ib_qp)
795{
796 struct iwch_dev *rhp;
797 struct iwch_qp *qhp;
798 struct iwch_qp_attributes attrs;
799 struct iwch_ucontext *ucontext;
800
801 qhp = to_iwch_qp(ib_qp);
802 rhp = qhp->rhp;
803
Steve Wise2df50da2007-03-06 14:43:58 -0600804 attrs.next_state = IWCH_QP_STATE_ERROR;
805 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
Steve Wiseb038ced2007-02-12 16:16:18 -0800806 wait_event(qhp->wait, !qhp->ep);
807
808 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
809
810 atomic_dec(&qhp->refcnt);
811 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
812
813 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
814 : NULL;
815 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
816 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
817
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800818 pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
819 ib_qp, qhp->wq.qpid, qhp);
Steve Wiseb038ced2007-02-12 16:16:18 -0800820 kfree(qhp);
821 return 0;
822}
823
824static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
825 struct ib_qp_init_attr *attrs,
826 struct ib_udata *udata)
827{
828 struct iwch_dev *rhp;
829 struct iwch_qp *qhp;
830 struct iwch_pd *php;
831 struct iwch_cq *schp;
832 struct iwch_cq *rchp;
833 struct iwch_create_qp_resp uresp;
834 int wqsize, sqsize, rqsize;
835 struct iwch_ucontext *ucontext;
836
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800837 pr_debug("%s ib_pd %p\n", __func__, pd);
Steve Wiseb038ced2007-02-12 16:16:18 -0800838 if (attrs->qp_type != IB_QPT_RC)
839 return ERR_PTR(-EINVAL);
840 php = to_iwch_pd(pd);
841 rhp = php->rhp;
842 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
843 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
844 if (!schp || !rchp)
845 return ERR_PTR(-EINVAL);
846
847 /* The RQT size must be # of entries + 1 rounded up to a power of two */
848 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
849 if (rqsize == attrs->cap.max_recv_wr)
850 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
851
852 /* T3 doesn't support RQT depth < 16 */
853 if (rqsize < 16)
854 rqsize = 16;
855
856 if (rqsize > T3_MAX_RQ_SIZE)
857 return ERR_PTR(-EINVAL);
858
Steve Wise1860cdf2007-04-26 15:21:09 -0500859 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
860 return ERR_PTR(-EINVAL);
861
Steve Wiseb038ced2007-02-12 16:16:18 -0800862 /*
863 * NOTE: The SQ and total WQ sizes don't need to be
864 * a power of two. However, all the code assumes
865 * they are. EG: Q_FREECNT() and friends.
866 */
867 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
868 wqsize = roundup_pow_of_two(rqsize + sqsize);
Steve Wisee7e55822008-07-14 23:48:45 -0700869
870 /*
871 * Kernel users need more wq space for fastreg WRs which can take
872 * 2 WR fragments.
873 */
874 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
875 if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
876 wqsize = roundup_pow_of_two(rqsize +
877 roundup_pow_of_two(attrs->cap.max_send_wr * 2));
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800878 pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__,
879 wqsize, sqsize, rqsize);
Steve Wiseb038ced2007-02-12 16:16:18 -0800880 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
881 if (!qhp)
882 return ERR_PTR(-ENOMEM);
883 qhp->wq.size_log2 = ilog2(wqsize);
884 qhp->wq.rq_size_log2 = ilog2(rqsize);
885 qhp->wq.sq_size_log2 = ilog2(sqsize);
Steve Wiseb038ced2007-02-12 16:16:18 -0800886 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
887 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
888 kfree(qhp);
889 return ERR_PTR(-ENOMEM);
890 }
Jon Mason1bab74e2008-02-29 13:53:18 -0800891
Steve Wiseb038ced2007-02-12 16:16:18 -0800892 attrs->cap.max_recv_wr = rqsize - 1;
893 attrs->cap.max_send_wr = sqsize;
Jon Mason1bab74e2008-02-29 13:53:18 -0800894 attrs->cap.max_inline_data = T3_MAX_INLINE;
895
Steve Wiseb038ced2007-02-12 16:16:18 -0800896 qhp->rhp = rhp;
897 qhp->attr.pd = php->pdid;
898 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
899 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
900 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
901 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
902 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
903 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
904 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
905 qhp->attr.state = IWCH_QP_STATE_IDLE;
906 qhp->attr.next_state = IWCH_QP_STATE_IDLE;
907
908 /*
909 * XXX - These don't get passed in from the openib user
910 * at create time. The CM sets them via a QP modify.
911 * Need to fix... I think the CM should
912 */
913 qhp->attr.enable_rdma_read = 1;
914 qhp->attr.enable_rdma_write = 1;
915 qhp->attr.enable_bind = 1;
916 qhp->attr.max_ord = 1;
917 qhp->attr.max_ird = 1;
918
919 spin_lock_init(&qhp->lock);
920 init_waitqueue_head(&qhp->wait);
921 atomic_set(&qhp->refcnt, 1);
Steve Wise13a23932009-09-09 11:25:55 -0700922
923 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
924 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
925 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
926 kfree(qhp);
927 return ERR_PTR(-ENOMEM);
928 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800929
930 if (udata) {
931
932 struct iwch_mm_entry *mm1, *mm2;
933
934 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
935 if (!mm1) {
936 iwch_destroy_qp(&qhp->ibqp);
937 return ERR_PTR(-ENOMEM);
938 }
939
940 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
941 if (!mm2) {
942 kfree(mm1);
943 iwch_destroy_qp(&qhp->ibqp);
944 return ERR_PTR(-ENOMEM);
945 }
946
947 uresp.qpid = qhp->wq.qpid;
948 uresp.size_log2 = qhp->wq.size_log2;
949 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
950 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
951 spin_lock(&ucontext->mmap_lock);
952 uresp.key = ucontext->key;
953 ucontext->key += PAGE_SIZE;
954 uresp.db_key = ucontext->key;
955 ucontext->key += PAGE_SIZE;
956 spin_unlock(&ucontext->mmap_lock);
957 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
958 kfree(mm1);
959 kfree(mm2);
960 iwch_destroy_qp(&qhp->ibqp);
961 return ERR_PTR(-EFAULT);
962 }
963 mm1->key = uresp.key;
964 mm1->addr = virt_to_phys(qhp->wq.queue);
965 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
966 insert_mmap(ucontext, mm1);
967 mm2->key = uresp.db_key;
968 mm2->addr = qhp->wq.udb & PAGE_MASK;
969 mm2->len = PAGE_SIZE;
970 insert_mmap(ucontext, mm2);
971 }
972 qhp->ibqp.qp_num = qhp->wq.qpid;
973 init_timer(&(qhp->timer));
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800974 pr_debug("%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
975 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
976 qhp->wq.qpid, qhp, (unsigned long long)qhp->wq.dma_addr,
977 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
Steve Wiseb038ced2007-02-12 16:16:18 -0800978 return &qhp->ibqp;
979}
980
981static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
982 int attr_mask, struct ib_udata *udata)
983{
984 struct iwch_dev *rhp;
985 struct iwch_qp *qhp;
986 enum iwch_qp_attr_mask mask = 0;
987 struct iwch_qp_attributes attrs;
988
Joe Perchesb7b37ee2017-02-09 14:23:49 -0800989 pr_debug("%s ib_qp %p\n", __func__, ibqp);
Steve Wiseb038ced2007-02-12 16:16:18 -0800990
991 /* iwarp does not support the RTR state */
992 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
993 attr_mask &= ~IB_QP_STATE;
994
995 /* Make sure we still have something left to do */
996 if (!attr_mask)
997 return 0;
998
999 memset(&attrs, 0, sizeof attrs);
1000 qhp = to_iwch_qp(ibqp);
1001 rhp = qhp->rhp;
1002
1003 attrs.next_state = iwch_convert_state(attr->qp_state);
1004 attrs.enable_rdma_read = (attr->qp_access_flags &
1005 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1006 attrs.enable_rdma_write = (attr->qp_access_flags &
1007 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1008 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1009
1010
1011 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
1012 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1013 (IWCH_QP_ATTR_ENABLE_RDMA_READ |
1014 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
1015 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1016
1017 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
1018}
1019
1020void iwch_qp_add_ref(struct ib_qp *qp)
1021{
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001022 pr_debug("%s ib_qp %p\n", __func__, qp);
Steve Wiseb038ced2007-02-12 16:16:18 -08001023 atomic_inc(&(to_iwch_qp(qp)->refcnt));
1024}
1025
1026void iwch_qp_rem_ref(struct ib_qp *qp)
1027{
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001028 pr_debug("%s ib_qp %p\n", __func__, qp);
Steve Wiseb038ced2007-02-12 16:16:18 -08001029 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
1030 wake_up(&(to_iwch_qp(qp)->wait));
1031}
1032
Adrian Bunk2b540352007-02-21 11:52:49 +01001033static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
Steve Wiseb038ced2007-02-12 16:16:18 -08001034{
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001035 pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
Steve Wiseb038ced2007-02-12 16:16:18 -08001036 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1037}
1038
1039
1040static int iwch_query_pkey(struct ib_device *ibdev,
1041 u8 port, u16 index, u16 * pkey)
1042{
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001043 pr_debug("%s ibdev %p\n", __func__, ibdev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001044 *pkey = 0;
1045 return 0;
1046}
1047
1048static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1049 int index, union ib_gid *gid)
1050{
1051 struct iwch_dev *dev;
1052
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001053 pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
1054 __func__, ibdev, port, index, gid);
Steve Wiseb038ced2007-02-12 16:16:18 -08001055 dev = to_iwch_dev(ibdev);
1056 BUG_ON(port == 0 || port > 2);
1057 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1058 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
1059 return 0;
1060}
1061
Steve Wise97d1cc82008-07-14 23:48:47 -07001062static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1063{
1064 struct ethtool_drvinfo info;
1065 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1066 char *cp, *next;
1067 unsigned fw_maj, fw_min, fw_mic;
1068
Steve Wise97d1cc82008-07-14 23:48:47 -07001069 lldev->ethtool_ops->get_drvinfo(lldev, &info);
Steve Wise97d1cc82008-07-14 23:48:47 -07001070
1071 next = info.fw_version + 1;
1072 cp = strsep(&next, ".");
1073 sscanf(cp, "%i", &fw_maj);
1074 cp = strsep(&next, ".");
1075 sscanf(cp, "%i", &fw_min);
1076 cp = strsep(&next, ".");
1077 sscanf(cp, "%i", &fw_mic);
1078
1079 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1080 (fw_mic & 0xffff);
1081}
1082
Matan Barak2528e332015-06-11 16:35:25 +03001083static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1084 struct ib_udata *uhw)
Steve Wiseb038ced2007-02-12 16:16:18 -08001085{
1086
1087 struct iwch_dev *dev;
Matan Barak2528e332015-06-11 16:35:25 +03001088
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001089 pr_debug("%s ibdev %p\n", __func__, ibdev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001090
Matan Barak2528e332015-06-11 16:35:25 +03001091 if (uhw->inlen || uhw->outlen)
1092 return -EINVAL;
1093
Steve Wiseb038ced2007-02-12 16:16:18 -08001094 dev = to_iwch_dev(ibdev);
1095 memset(props, 0, sizeof *props);
1096 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
Steve Wise97d1cc82008-07-14 23:48:47 -07001097 props->hw_ver = dev->rdev.t3cdev_p->type;
1098 props->fw_ver = fw_vers_string_to_u64(dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001099 props->device_cap_flags = dev->device_cap_flags;
Jon Mason52c80842008-07-14 23:48:49 -07001100 props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
Steve Wiseb038ced2007-02-12 16:16:18 -08001101 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1102 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
Steve Wiseccaf10d2008-04-29 13:46:52 -07001103 props->max_mr_size = dev->attr.max_mr_size;
Steve Wiseb038ced2007-02-12 16:16:18 -08001104 props->max_qp = dev->attr.max_qps;
1105 props->max_qp_wr = dev->attr.max_wrs;
1106 props->max_sge = dev->attr.max_sge_per_wr;
1107 props->max_sge_rd = 1;
1108 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
Steve Wise9a766642007-11-09 09:21:58 -06001109 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
Steve Wiseb038ced2007-02-12 16:16:18 -08001110 props->max_cq = dev->attr.max_cqs;
1111 props->max_cqe = dev->attr.max_cqes_per_cq;
1112 props->max_mr = dev->attr.max_mem_regs;
1113 props->max_pd = dev->attr.max_pds;
1114 props->local_ca_ack_delay = 0;
Steve Wisee7e55822008-07-14 23:48:45 -07001115 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
Steve Wiseb038ced2007-02-12 16:16:18 -08001116
1117 return 0;
1118}
1119
1120static int iwch_query_port(struct ib_device *ibdev,
1121 u8 port, struct ib_port_attr *props)
1122{
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001123 struct iwch_dev *dev;
1124 struct net_device *netdev;
1125 struct in_device *inetdev;
1126
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001127 pr_debug("%s ibdev %p\n", __func__, ibdev);
Jon Masonc752c782008-09-30 14:51:19 -07001128
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001129 dev = to_iwch_dev(ibdev);
1130 netdev = dev->rdev.port_info.lldevs[port-1];
1131
Or Gerlitzc4550c62017-01-24 13:02:39 +02001132 /* props being zeroed by the caller, avoid zeroing it here */
Steve Wiseb038ced2007-02-12 16:16:18 -08001133 props->max_mtu = IB_MTU_4096;
Amrani, Ramd3f4aad2016-12-26 08:40:57 +02001134 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001135
1136 if (!netif_carrier_ok(netdev))
1137 props->state = IB_PORT_DOWN;
1138 else {
1139 inetdev = in_dev_get(netdev);
Steve Wisee5da4ed2009-10-07 15:51:07 -07001140 if (inetdev) {
1141 if (inetdev->ifa_list)
1142 props->state = IB_PORT_ACTIVE;
1143 else
1144 props->state = IB_PORT_INIT;
1145 in_dev_put(inetdev);
1146 } else
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001147 props->state = IB_PORT_INIT;
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001148 }
1149
Steve Wiseb038ced2007-02-12 16:16:18 -08001150 props->port_cap_flags =
1151 IB_PORT_CM_SUP |
1152 IB_PORT_SNMP_TUNNEL_SUP |
1153 IB_PORT_REINIT_SUP |
1154 IB_PORT_DEVICE_MGMT_SUP |
1155 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1156 props->gid_tbl_len = 1;
1157 props->pkey_tbl_len = 1;
Steve Wiseb038ced2007-02-12 16:16:18 -08001158 props->active_width = 2;
Or Gerlitz2e966912012-02-28 18:49:50 +02001159 props->active_speed = IB_SPEED_DDR;
Steve Wiseb038ced2007-02-12 16:16:18 -08001160 props->max_msg_sz = -1;
1161
1162 return 0;
1163}
1164
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001165static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
1166 char *buf)
Steve Wiseb038ced2007-02-12 16:16:18 -08001167{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001168 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1169 ibdev.dev);
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001170 pr_debug("%s dev 0x%p\n", __func__, dev);
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001171 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
Steve Wiseb038ced2007-02-12 16:16:18 -08001172}
1173
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001174static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1175 char *buf)
Steve Wiseb038ced2007-02-12 16:16:18 -08001176{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001177 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1178 ibdev.dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001179 struct ethtool_drvinfo info;
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001180 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
Steve Wiseb038ced2007-02-12 16:16:18 -08001181
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001182 pr_debug("%s dev 0x%p\n", __func__, dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001183 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1184 return sprintf(buf, "%s\n", info.driver);
1185}
1186
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001187static ssize_t show_board(struct device *dev, struct device_attribute *attr,
1188 char *buf)
Steve Wiseb038ced2007-02-12 16:16:18 -08001189{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001190 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1191 ibdev.dev);
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001192 pr_debug("%s dev 0x%p\n", __func__, dev);
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001193 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1194 iwch_dev->rdev.rnic_info.pdev->device);
Steve Wiseb038ced2007-02-12 16:16:18 -08001195}
1196
Christoph Lameterb40f4752016-05-16 12:49:33 -05001197enum counters {
1198 IPINRECEIVES,
1199 IPINHDRERRORS,
1200 IPINADDRERRORS,
1201 IPINUNKNOWNPROTOS,
1202 IPINDISCARDS,
1203 IPINDELIVERS,
1204 IPOUTREQUESTS,
1205 IPOUTDISCARDS,
1206 IPOUTNOROUTES,
1207 IPREASMTIMEOUT,
1208 IPREASMREQDS,
1209 IPREASMOKS,
1210 IPREASMFAILS,
1211 TCPACTIVEOPENS,
1212 TCPPASSIVEOPENS,
1213 TCPATTEMPTFAILS,
1214 TCPESTABRESETS,
1215 TCPCURRESTAB,
1216 TCPINSEGS,
1217 TCPOUTSEGS,
1218 TCPRETRANSSEGS,
1219 TCPINERRS,
1220 TCPOUTRSTS,
1221 TCPRTOMIN,
1222 TCPRTOMAX,
1223 NR_COUNTERS
1224};
1225
1226static const char * const names[] = {
1227 [IPINRECEIVES] = "ipInReceives",
1228 [IPINHDRERRORS] = "ipInHdrErrors",
1229 [IPINADDRERRORS] = "ipInAddrErrors",
1230 [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
1231 [IPINDISCARDS] = "ipInDiscards",
1232 [IPINDELIVERS] = "ipInDelivers",
1233 [IPOUTREQUESTS] = "ipOutRequests",
1234 [IPOUTDISCARDS] = "ipOutDiscards",
1235 [IPOUTNOROUTES] = "ipOutNoRoutes",
1236 [IPREASMTIMEOUT] = "ipReasmTimeout",
1237 [IPREASMREQDS] = "ipReasmReqds",
1238 [IPREASMOKS] = "ipReasmOKs",
1239 [IPREASMFAILS] = "ipReasmFails",
1240 [TCPACTIVEOPENS] = "tcpActiveOpens",
1241 [TCPPASSIVEOPENS] = "tcpPassiveOpens",
1242 [TCPATTEMPTFAILS] = "tcpAttemptFails",
1243 [TCPESTABRESETS] = "tcpEstabResets",
1244 [TCPCURRESTAB] = "tcpCurrEstab",
1245 [TCPINSEGS] = "tcpInSegs",
1246 [TCPOUTSEGS] = "tcpOutSegs",
1247 [TCPRETRANSSEGS] = "tcpRetransSegs",
1248 [TCPINERRS] = "tcpInErrs",
1249 [TCPOUTRSTS] = "tcpOutRsts",
1250 [TCPRTOMIN] = "tcpRtoMin",
1251 [TCPRTOMAX] = "tcpRtoMax",
1252};
1253
1254static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
1255 u8 port_num)
1256{
1257 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
1258
1259 /* Our driver only supports device level stats */
1260 if (port_num != 0)
1261 return NULL;
1262
1263 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
1264 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1265}
1266
1267static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1268 u8 port, int index)
Steve Wise14cc1802008-07-14 23:48:48 -07001269{
1270 struct iwch_dev *dev;
1271 struct tp_mib_stats m;
1272 int ret;
1273
Christoph Lameterb40f4752016-05-16 12:49:33 -05001274 if (port != 0 || !stats)
1275 return -ENOSYS;
1276
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001277 pr_debug("%s ibdev %p\n", __func__, ibdev);
Steve Wise14cc1802008-07-14 23:48:48 -07001278 dev = to_iwch_dev(ibdev);
1279 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1280 if (ret)
1281 return -ENOSYS;
1282
Christoph Lameterb40f4752016-05-16 12:49:33 -05001283 stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
1284 stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
1285 stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
1286 stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
1287 stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
1288 stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
1289 stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
1290 stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
1291 stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
1292 stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
1293 stats->value[IPREASMREQDS] = m.ipReasmReqds;
1294 stats->value[IPREASMOKS] = m.ipReasmOKs;
1295 stats->value[IPREASMFAILS] = m.ipReasmFails;
1296 stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
1297 stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
1298 stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
1299 stats->value[TCPESTABRESETS] = m.tcpEstabResets;
1300 stats->value[TCPCURRESTAB] = m.tcpOutRsts;
1301 stats->value[TCPINSEGS] = m.tcpCurrEstab;
1302 stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
1303 stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
1304 stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
1305 stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
1306 stats->value[TCPRTOMIN] = m.tcpRtoMin;
1307 stats->value[TCPRTOMAX] = m.tcpRtoMax;
1308
1309 return stats->num_counters;
Steve Wise14cc1802008-07-14 23:48:48 -07001310}
1311
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001312static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001313static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1314static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
Steve Wiseb038ced2007-02-12 16:16:18 -08001315
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001316static struct device_attribute *iwch_class_attributes[] = {
1317 &dev_attr_hw_rev,
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001318 &dev_attr_hca_type,
Steve Wise14cc1802008-07-14 23:48:48 -07001319 &dev_attr_board_id,
Steve Wiseb038ced2007-02-12 16:16:18 -08001320};
1321
Ira Weiny77386132015-05-13 20:02:58 -04001322static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
1323 struct ib_port_immutable *immutable)
1324{
1325 struct ib_port_attr attr;
1326 int err;
1327
Or Gerlitzc4550c62017-01-24 13:02:39 +02001328 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
1329
1330 err = ib_query_port(ibdev, port_num, &attr);
Ira Weiny77386132015-05-13 20:02:58 -04001331 if (err)
1332 return err;
1333
1334 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1335 immutable->gid_tbl_len = attr.gid_tbl_len;
1336
1337 return 0;
1338}
1339
Ira Weinye1803692016-06-15 02:21:57 -04001340static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str,
1341 size_t str_len)
1342{
1343 struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
1344 struct ethtool_drvinfo info;
1345 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1346
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001347 pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
Ira Weinye1803692016-06-15 02:21:57 -04001348 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1349 snprintf(str, str_len, "%s", info.fw_version);
1350}
1351
Steve Wiseb038ced2007-02-12 16:16:18 -08001352int iwch_register_device(struct iwch_dev *dev)
1353{
1354 int ret;
1355 int i;
1356
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001357 pr_debug("%s iwch_dev %p\n", __func__, dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001358 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1359 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1360 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1361 dev->ibdev.owner = THIS_MODULE;
Steve Wisebe433242008-08-04 11:08:37 -07001362 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1363 IB_DEVICE_MEM_WINDOW |
1364 IB_DEVICE_MEM_MGT_EXTENSIONS;
Steve Wise96f15c02008-07-14 23:48:53 -07001365
1366 /* cxgb3 supports STag 0. */
1367 dev->ibdev.local_dma_lkey = 0;
Steve Wiseb038ced2007-02-12 16:16:18 -08001368
1369 dev->ibdev.uverbs_cmd_mask =
1370 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1371 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1372 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1373 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1374 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1375 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1376 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1377 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1378 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1379 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1380 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1381 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1382 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1383 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1384 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1385 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1386 (1ull << IB_USER_VERBS_CMD_POST_RECV);
1387 dev->ibdev.node_type = RDMA_NODE_RNIC;
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001388 BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
Steve Wiseb038ced2007-02-12 16:16:18 -08001389 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1390 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001391 dev->ibdev.num_comp_vectors = 1;
Bart Van Assche91f734b2017-01-20 13:04:15 -08001392 dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
Steve Wiseb038ced2007-02-12 16:16:18 -08001393 dev->ibdev.query_device = iwch_query_device;
1394 dev->ibdev.query_port = iwch_query_port;
Steve Wiseb038ced2007-02-12 16:16:18 -08001395 dev->ibdev.query_pkey = iwch_query_pkey;
1396 dev->ibdev.query_gid = iwch_query_gid;
1397 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1398 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1399 dev->ibdev.mmap = iwch_mmap;
1400 dev->ibdev.alloc_pd = iwch_allocate_pd;
1401 dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1402 dev->ibdev.create_ah = iwch_ah_create;
1403 dev->ibdev.destroy_ah = iwch_ah_destroy;
1404 dev->ibdev.create_qp = iwch_create_qp;
1405 dev->ibdev.modify_qp = iwch_ib_modify_qp;
1406 dev->ibdev.destroy_qp = iwch_destroy_qp;
1407 dev->ibdev.create_cq = iwch_create_cq;
1408 dev->ibdev.destroy_cq = iwch_destroy_cq;
1409 dev->ibdev.resize_cq = iwch_resize_cq;
1410 dev->ibdev.poll_cq = iwch_poll_cq;
1411 dev->ibdev.get_dma_mr = iwch_get_dma_mr;
Steve Wiseb038ced2007-02-12 16:16:18 -08001412 dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1413 dev->ibdev.dereg_mr = iwch_dereg_mr;
1414 dev->ibdev.alloc_mw = iwch_alloc_mw;
Steve Wiseb038ced2007-02-12 16:16:18 -08001415 dev->ibdev.dealloc_mw = iwch_dealloc_mw;
Sagi Grimbergf683d3b2015-07-30 10:32:45 +03001416 dev->ibdev.alloc_mr = iwch_alloc_mr;
Sagi Grimberg14fb4172015-10-13 19:11:29 +03001417 dev->ibdev.map_mr_sg = iwch_map_mr_sg;
Steve Wiseb038ced2007-02-12 16:16:18 -08001418 dev->ibdev.attach_mcast = iwch_multicast_attach;
1419 dev->ibdev.detach_mcast = iwch_multicast_detach;
1420 dev->ibdev.process_mad = iwch_process_mad;
Steve Wiseb038ced2007-02-12 16:16:18 -08001421 dev->ibdev.req_notify_cq = iwch_arm_cq;
1422 dev->ibdev.post_send = iwch_post_send;
1423 dev->ibdev.post_recv = iwch_post_receive;
Christoph Lameterb40f4752016-05-16 12:49:33 -05001424 dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
1425 dev->ibdev.get_hw_stats = iwch_get_mib;
Steve Wiseb9551502010-10-21 12:37:06 +00001426 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
Ira Weiny77386132015-05-13 20:02:58 -04001427 dev->ibdev.get_port_immutable = iwch_port_immutable;
Ira Weinye1803692016-06-15 02:21:57 -04001428 dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str;
Steve Wiseb038ced2007-02-12 16:16:18 -08001429
WANG Cong6abb6ea2007-07-09 20:12:26 -07001430 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
1431 if (!dev->ibdev.iwcm)
1432 return -ENOMEM;
1433
Steve Wiseb038ced2007-02-12 16:16:18 -08001434 dev->ibdev.iwcm->connect = iwch_connect;
1435 dev->ibdev.iwcm->accept = iwch_accept_cr;
1436 dev->ibdev.iwcm->reject = iwch_reject_cr;
1437 dev->ibdev.iwcm->create_listen = iwch_create_listen;
1438 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1439 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1440 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1441 dev->ibdev.iwcm->get_qp = iwch_get_qp;
Steve Wisead202342016-04-12 06:55:01 -07001442 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1443 sizeof(dev->ibdev.iwcm->ifname));
Steve Wiseb038ced2007-02-12 16:16:18 -08001444
Ralph Campbell9a6edb62010-05-06 17:03:25 -07001445 ret = ib_register_device(&dev->ibdev, NULL);
Steve Wiseb038ced2007-02-12 16:16:18 -08001446 if (ret)
1447 goto bail1;
1448
1449 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001450 ret = device_create_file(&dev->ibdev.dev,
1451 iwch_class_attributes[i]);
Steve Wiseb038ced2007-02-12 16:16:18 -08001452 if (ret) {
1453 goto bail2;
1454 }
1455 }
1456 return 0;
1457bail2:
1458 ib_unregister_device(&dev->ibdev);
1459bail1:
Steve Wise3793d2f2009-09-05 20:22:36 -07001460 kfree(dev->ibdev.iwcm);
Steve Wiseb038ced2007-02-12 16:16:18 -08001461 return ret;
1462}
1463
1464void iwch_unregister_device(struct iwch_dev *dev)
1465{
1466 int i;
1467
Joe Perchesb7b37ee2017-02-09 14:23:49 -08001468 pr_debug("%s iwch_dev %p\n", __func__, dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001469 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001470 device_remove_file(&dev->ibdev.dev,
1471 iwch_class_attributes[i]);
Steve Wiseb038ced2007-02-12 16:16:18 -08001472 ib_unregister_device(&dev->ibdev);
Steve Wise3793d2f2009-09-05 20:22:36 -07001473 kfree(dev->ibdev.iwcm);
Steve Wiseb038ced2007-02-12 16:16:18 -08001474 return;
1475}