blob: b47be87d5a53a4d8b649aade50fe2181a33c26b4 [file] [log] [blame]
Steve Wiseb038ced2007-02-12 16:16:18 -08001/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
Steve Wiseb038ced2007-02-12 16:16:18 -08003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/delay.h>
38#include <linux/errno.h>
39#include <linux/list.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040040#include <linux/sched.h>
Steve Wiseb038ced2007-02-12 16:16:18 -080041#include <linux/spinlock.h>
42#include <linux/ethtool.h>
Steve Wise7f049f22007-11-26 11:28:44 -060043#include <linux/rtnetlink.h>
Steve Wise7ab1a2b2009-05-27 14:42:36 -070044#include <linux/inetdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090045#include <linux/slab.h>
Steve Wiseb038ced2007-02-12 16:16:18 -080046
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/byteorder.h>
50
51#include <rdma/iw_cm.h>
52#include <rdma/ib_verbs.h>
53#include <rdma/ib_smi.h>
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080054#include <rdma/ib_umem.h>
Steve Wiseb038ced2007-02-12 16:16:18 -080055#include <rdma/ib_user_verbs.h>
56
57#include "cxio_hal.h"
58#include "iwch.h"
59#include "iwch_provider.h"
60#include "iwch_cm.h"
61#include "iwch_user.h"
Steve Wise14cc1802008-07-14 23:48:48 -070062#include "common.h"
Steve Wiseb038ced2007-02-12 16:16:18 -080063
Steve Wiseb038ced2007-02-12 16:16:18 -080064static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
65 struct ib_ah_attr *ah_attr)
66{
67 return ERR_PTR(-ENOSYS);
68}
69
70static int iwch_ah_destroy(struct ib_ah *ah)
71{
72 return -ENOSYS;
73}
74
75static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
76{
77 return -ENOSYS;
78}
79
80static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
81{
82 return -ENOSYS;
83}
84
85static int iwch_process_mad(struct ib_device *ibdev,
86 int mad_flags,
87 u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -040088 const struct ib_wc *in_wc,
89 const struct ib_grh *in_grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -040090 const struct ib_mad_hdr *in_mad,
91 size_t in_mad_size,
92 struct ib_mad_hdr *out_mad,
93 size_t *out_mad_size,
94 u16 *out_mad_pkey_index)
Steve Wiseb038ced2007-02-12 16:16:18 -080095{
96 return -ENOSYS;
97}
98
99static int iwch_dealloc_ucontext(struct ib_ucontext *context)
100{
101 struct iwch_dev *rhp = to_iwch_dev(context->device);
102 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
103 struct iwch_mm_entry *mm, *tmp;
104
Harvey Harrison33718362008-04-16 21:01:10 -0700105 PDBG("%s context %p\n", __func__, context);
Steve Wiseb038ced2007-02-12 16:16:18 -0800106 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
107 kfree(mm);
108 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
109 kfree(ucontext);
110 return 0;
111}
112
113static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
114 struct ib_udata *udata)
115{
116 struct iwch_ucontext *context;
117 struct iwch_dev *rhp = to_iwch_dev(ibdev);
118
Harvey Harrison33718362008-04-16 21:01:10 -0700119 PDBG("%s ibdev %p\n", __func__, ibdev);
Steve Wiseb038ced2007-02-12 16:16:18 -0800120 context = kzalloc(sizeof(*context), GFP_KERNEL);
121 if (!context)
122 return ERR_PTR(-ENOMEM);
123 cxio_init_ucontext(&rhp->rdev, &context->uctx);
124 INIT_LIST_HEAD(&context->mmaps);
125 spin_lock_init(&context->mmap_lock);
126 return &context->ibucontext;
127}
128
129static int iwch_destroy_cq(struct ib_cq *ib_cq)
130{
131 struct iwch_cq *chp;
132
Harvey Harrison33718362008-04-16 21:01:10 -0700133 PDBG("%s ib_cq %p\n", __func__, ib_cq);
Steve Wiseb038ced2007-02-12 16:16:18 -0800134 chp = to_iwch_cq(ib_cq);
135
136 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
137 atomic_dec(&chp->refcnt);
138 wait_event(chp->wait, !atomic_read(&chp->refcnt));
139
140 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
141 kfree(chp);
142 return 0;
143}
144
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300145static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
146 const struct ib_cq_init_attr *attr,
147 struct ib_ucontext *ib_context,
148 struct ib_udata *udata)
Steve Wiseb038ced2007-02-12 16:16:18 -0800149{
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300150 int entries = attr->cqe;
Steve Wiseb038ced2007-02-12 16:16:18 -0800151 struct iwch_dev *rhp;
152 struct iwch_cq *chp;
153 struct iwch_create_cq_resp uresp;
154 struct iwch_create_cq_req ureq;
155 struct iwch_ucontext *ucontext = NULL;
Steve Wiseb9551502010-10-21 12:37:06 +0000156 static int warned;
157 size_t resplen;
Steve Wiseb038ced2007-02-12 16:16:18 -0800158
Harvey Harrison33718362008-04-16 21:01:10 -0700159 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300160 if (attr->flags)
161 return ERR_PTR(-EINVAL);
162
Steve Wiseb038ced2007-02-12 16:16:18 -0800163 rhp = to_iwch_dev(ibdev);
164 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
165 if (!chp)
166 return ERR_PTR(-ENOMEM);
167
168 if (ib_context) {
169 ucontext = to_iwch_ucontext(ib_context);
170 if (!t3a_device(rhp)) {
171 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
172 kfree(chp);
173 return ERR_PTR(-EFAULT);
174 }
175 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
176 }
177 }
178
179 if (t3a_device(rhp)) {
180
181 /*
182 * T3A: Add some fluff to handle extra CQEs inserted
183 * for various errors.
184 * Additional CQE possibilities:
185 * TERMINATE,
186 * incoming RDMA WRITE Failures
187 * incoming RDMA READ REQUEST FAILUREs
188 * NOTE: We cannot ensure the CQ won't overflow.
189 */
190 entries += 16;
191 }
192 entries = roundup_pow_of_two(entries);
193 chp->cq.size_log2 = ilog2(entries);
194
Steve Wise5279d3a2010-01-27 20:22:34 +0000195 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800196 kfree(chp);
197 return ERR_PTR(-ENOMEM);
198 }
199 chp->rhp = rhp;
Jon Mason4fa45722008-03-09 13:54:12 -0700200 chp->ibcq.cqe = 1 << chp->cq.size_log2;
Steve Wiseb038ced2007-02-12 16:16:18 -0800201 spin_lock_init(&chp->lock);
Kumar Sanghvif7cc25d2011-10-24 21:20:22 +0530202 spin_lock_init(&chp->comp_handler_lock);
Steve Wiseb038ced2007-02-12 16:16:18 -0800203 atomic_set(&chp->refcnt, 1);
204 init_waitqueue_head(&chp->wait);
Steve Wise13a23932009-09-09 11:25:55 -0700205 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
206 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
207 kfree(chp);
208 return ERR_PTR(-ENOMEM);
209 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800210
211 if (ucontext) {
212 struct iwch_mm_entry *mm;
213
214 mm = kmalloc(sizeof *mm, GFP_KERNEL);
215 if (!mm) {
216 iwch_destroy_cq(&chp->ibcq);
217 return ERR_PTR(-ENOMEM);
218 }
219 uresp.cqid = chp->cq.cqid;
220 uresp.size_log2 = chp->cq.size_log2;
221 spin_lock(&ucontext->mmap_lock);
222 uresp.key = ucontext->key;
223 ucontext->key += PAGE_SIZE;
224 spin_unlock(&ucontext->mmap_lock);
Steve Wiseb9551502010-10-21 12:37:06 +0000225 mm->key = uresp.key;
226 mm->addr = virt_to_phys(chp->cq.queue);
227 if (udata->outlen < sizeof uresp) {
228 if (!warned++)
229 printk(KERN_WARNING MOD "Warning - "
230 "downlevel libcxgb3 (non-fatal).\n");
231 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
232 sizeof(struct t3_cqe));
233 resplen = sizeof(struct iwch_create_cq_resp_v0);
234 } else {
235 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
236 sizeof(struct t3_cqe));
237 uresp.memsize = mm->len;
Dan Carpenter246fcdb2013-07-29 22:19:14 +0300238 uresp.reserved = 0;
Steve Wiseb9551502010-10-21 12:37:06 +0000239 resplen = sizeof uresp;
240 }
241 if (ib_copy_to_udata(udata, &uresp, resplen)) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800242 kfree(mm);
243 iwch_destroy_cq(&chp->ibcq);
244 return ERR_PTR(-EFAULT);
245 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800246 insert_mmap(ucontext, mm);
247 }
248 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
249 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
250 (unsigned long long) chp->cq.dma_addr);
251 return &chp->ibcq;
252}
253
254static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
255{
256#ifdef notyet
257 struct iwch_cq *chp = to_iwch_cq(cq);
258 struct t3_cq oldcq, newcq;
259 int ret;
260
Harvey Harrison33718362008-04-16 21:01:10 -0700261 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
Steve Wiseb038ced2007-02-12 16:16:18 -0800262
263 /* We don't downsize... */
264 if (cqe <= cq->cqe)
265 return 0;
266
267 /* create new t3_cq with new size */
268 cqe = roundup_pow_of_two(cqe+1);
269 newcq.size_log2 = ilog2(cqe);
270
271 /* Dont allow resize to less than the current wce count */
272 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
273 return -ENOMEM;
274 }
275
276 /* Quiesce all QPs using this CQ */
277 ret = iwch_quiesce_qps(chp);
278 if (ret) {
279 return ret;
280 }
281
282 ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
283 if (ret) {
284 return ret;
285 }
286
287 /* copy CQEs */
288 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
289 sizeof(struct t3_cqe));
290
291 /* old iwch_qp gets new t3_cq but keeps old cqid */
292 oldcq = chp->cq;
293 chp->cq = newcq;
294 chp->cq.cqid = oldcq.cqid;
295
296 /* resize new t3_cq to update the HW context */
297 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
298 if (ret) {
299 chp->cq = oldcq;
300 return ret;
301 }
302 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
303
304 /* destroy old t3_cq */
305 oldcq.cqid = newcq.cqid;
306 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
307 if (ret) {
308 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700309 __func__, ret);
Steve Wiseb038ced2007-02-12 16:16:18 -0800310 }
311
312 /* add user hooks here */
313
314 /* resume qps */
315 ret = iwch_resume_qps(chp);
316 return ret;
317#else
318 return -ENOSYS;
319#endif
320}
321
Roland Dreiered23a722007-05-06 21:02:48 -0700322static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
Steve Wiseb038ced2007-02-12 16:16:18 -0800323{
324 struct iwch_dev *rhp;
325 struct iwch_cq *chp;
326 enum t3_cq_opcode cq_op;
327 int err;
328 unsigned long flag;
329 u32 rptr;
330
331 chp = to_iwch_cq(ibcq);
332 rhp = chp->rhp;
Roland Dreiered23a722007-05-06 21:02:48 -0700333 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
Steve Wiseb038ced2007-02-12 16:16:18 -0800334 cq_op = CQ_ARM_SE;
335 else
336 cq_op = CQ_ARM_AN;
337 if (chp->user_rptr_addr) {
338 if (get_user(rptr, chp->user_rptr_addr))
339 return -EFAULT;
340 spin_lock_irqsave(&chp->lock, flag);
341 chp->cq.rptr = rptr;
342 } else
343 spin_lock_irqsave(&chp->lock, flag);
Harvey Harrison33718362008-04-16 21:01:10 -0700344 PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
Steve Wiseb038ced2007-02-12 16:16:18 -0800345 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
346 spin_unlock_irqrestore(&chp->lock, flag);
Roland Dreiered23a722007-05-06 21:02:48 -0700347 if (err < 0)
Steve Wiseb038ced2007-02-12 16:16:18 -0800348 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
349 chp->cq.cqid);
Roland Dreiered23a722007-05-06 21:02:48 -0700350 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
351 err = 0;
Steve Wiseb038ced2007-02-12 16:16:18 -0800352 return err;
353}
354
355static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
356{
357 int len = vma->vm_end - vma->vm_start;
358 u32 key = vma->vm_pgoff << PAGE_SHIFT;
359 struct cxio_rdev *rdev_p;
360 int ret = 0;
361 struct iwch_mm_entry *mm;
362 struct iwch_ucontext *ucontext;
Steve Wiseaeb100e2007-03-02 16:06:36 -0600363 u64 addr;
Steve Wiseb038ced2007-02-12 16:16:18 -0800364
Harvey Harrison33718362008-04-16 21:01:10 -0700365 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
Steve Wiseb038ced2007-02-12 16:16:18 -0800366 key, len);
367
368 if (vma->vm_start & (PAGE_SIZE-1)) {
369 return -EINVAL;
370 }
371
372 rdev_p = &(to_iwch_dev(context->device)->rdev);
373 ucontext = to_iwch_ucontext(context);
374
375 mm = remove_mmap(ucontext, key, len);
376 if (!mm)
377 return -EINVAL;
Steve Wiseaeb100e2007-03-02 16:06:36 -0600378 addr = mm->addr;
Steve Wiseb038ced2007-02-12 16:16:18 -0800379 kfree(mm);
380
Steve Wiseaeb100e2007-03-02 16:06:36 -0600381 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
382 (addr < (rdev_p->rnic_info.udbell_physbase +
Steve Wiseb038ced2007-02-12 16:16:18 -0800383 rdev_p->rnic_info.udbell_len))) {
384
385 /*
386 * Map T3 DB register.
387 */
388 if (vma->vm_flags & VM_READ) {
389 return -EPERM;
390 }
391
392 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
393 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
394 vma->vm_flags &= ~VM_MAYREAD;
395 ret = io_remap_pfn_range(vma, vma->vm_start,
Steve Wiseaeb100e2007-03-02 16:06:36 -0600396 addr >> PAGE_SHIFT,
Steve Wiseb038ced2007-02-12 16:16:18 -0800397 len, vma->vm_page_prot);
398 } else {
399
400 /*
401 * Map WQ or CQ contig dma memory...
402 */
403 ret = remap_pfn_range(vma, vma->vm_start,
Steve Wiseaeb100e2007-03-02 16:06:36 -0600404 addr >> PAGE_SHIFT,
Steve Wiseb038ced2007-02-12 16:16:18 -0800405 len, vma->vm_page_prot);
406 }
407
408 return ret;
409}
410
411static int iwch_deallocate_pd(struct ib_pd *pd)
412{
413 struct iwch_dev *rhp;
414 struct iwch_pd *php;
415
416 php = to_iwch_pd(pd);
417 rhp = php->rhp;
Harvey Harrison33718362008-04-16 21:01:10 -0700418 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
Steve Wiseb038ced2007-02-12 16:16:18 -0800419 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
420 kfree(php);
421 return 0;
422}
423
424static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
425 struct ib_ucontext *context,
426 struct ib_udata *udata)
427{
428 struct iwch_pd *php;
429 u32 pdid;
430 struct iwch_dev *rhp;
431
Harvey Harrison33718362008-04-16 21:01:10 -0700432 PDBG("%s ibdev %p\n", __func__, ibdev);
Steve Wiseb038ced2007-02-12 16:16:18 -0800433 rhp = (struct iwch_dev *) ibdev;
434 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
435 if (!pdid)
436 return ERR_PTR(-EINVAL);
437 php = kzalloc(sizeof(*php), GFP_KERNEL);
438 if (!php) {
439 cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
440 return ERR_PTR(-ENOMEM);
441 }
442 php->pdid = pdid;
443 php->rhp = rhp;
444 if (context) {
445 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
446 iwch_deallocate_pd(&php->ibpd);
447 return ERR_PTR(-EFAULT);
448 }
449 }
Harvey Harrison33718362008-04-16 21:01:10 -0700450 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
Steve Wiseb038ced2007-02-12 16:16:18 -0800451 return &php->ibpd;
452}
453
454static int iwch_dereg_mr(struct ib_mr *ib_mr)
455{
456 struct iwch_dev *rhp;
457 struct iwch_mr *mhp;
458 u32 mmid;
459
Harvey Harrison33718362008-04-16 21:01:10 -0700460 PDBG("%s ib_mr %p\n", __func__, ib_mr);
Steve Wiseb038ced2007-02-12 16:16:18 -0800461
462 mhp = to_iwch_mr(ib_mr);
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300463 kfree(mhp->pages);
Steve Wiseb038ced2007-02-12 16:16:18 -0800464 rhp = mhp->rhp;
465 mmid = mhp->attr.stag >> 8;
466 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
467 mhp->attr.pbl_addr);
Roland Dreier273748c2008-05-06 15:56:22 -0700468 iwch_free_pbl(mhp);
Steve Wiseb038ced2007-02-12 16:16:18 -0800469 remove_handle(rhp, &rhp->mmidr, mmid);
470 if (mhp->kva)
471 kfree((void *) (unsigned long) mhp->kva);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800472 if (mhp->umem)
473 ib_umem_release(mhp->umem);
Harvey Harrison33718362008-04-16 21:01:10 -0700474 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
Steve Wiseb038ced2007-02-12 16:16:18 -0800475 kfree(mhp);
476 return 0;
477}
478
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100479static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
Steve Wiseb038ced2007-02-12 16:16:18 -0800480{
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100481 const u64 total_size = 0xffffffff;
482 const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
483 struct iwch_pd *php = to_iwch_pd(pd);
484 struct iwch_dev *rhp = php->rhp;
Steve Wiseb038ced2007-02-12 16:16:18 -0800485 struct iwch_mr *mhp;
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100486 __be64 *page_list;
487 int shift = 26, npages, ret, i;
Steve Wiseb038ced2007-02-12 16:16:18 -0800488
Harvey Harrison33718362008-04-16 21:01:10 -0700489 PDBG("%s ib_pd %p\n", __func__, pd);
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100490
491 /*
492 * T3 only supports 32 bits of size.
493 */
494 if (sizeof(phys_addr_t) > 4) {
495 pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
496 return ERR_PTR(-ENOTSUPP);
497 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800498
Steve Wiseb038ced2007-02-12 16:16:18 -0800499 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
500 if (!mhp)
501 return ERR_PTR(-ENOMEM);
502
Roland Dreier273748c2008-05-06 15:56:22 -0700503 mhp->rhp = rhp;
504
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100505 npages = (total_size + (1ULL << shift) - 1) >> shift;
506 if (!npages) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800507 ret = -EINVAL;
508 goto err;
509 }
510
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100511 page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
512 if (!page_list) {
513 ret = -ENOMEM;
Steve Wiseb038ced2007-02-12 16:16:18 -0800514 goto err;
515 }
516
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100517 for (i = 0; i < npages; i++)
518 page_list[i] = cpu_to_be64((u64)i << shift);
519
520 PDBG("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
521 __func__, mask, shift, total_size, npages);
Steve Wiseb038ced2007-02-12 16:16:18 -0800522
Roland Dreier273748c2008-05-06 15:56:22 -0700523 ret = iwch_alloc_pbl(mhp, npages);
524 if (ret) {
525 kfree(page_list);
526 goto err_pbl;
527 }
528
529 ret = iwch_write_pbl(mhp, page_list, npages, 0);
530 kfree(page_list);
531 if (ret)
532 goto err_pbl;
533
Steve Wiseb038ced2007-02-12 16:16:18 -0800534 mhp->attr.pdid = php->pdid;
535 mhp->attr.zbva = 0;
536
Steve Wisee64518f2007-03-06 14:44:07 -0600537 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
Christoph Hellwig35cb3fc2015-12-23 19:12:49 +0100538 mhp->attr.va_fbo = 0;
Steve Wiseb038ced2007-02-12 16:16:18 -0800539 mhp->attr.page_size = shift - 12;
540
541 mhp->attr.len = (u32) total_size;
542 mhp->attr.pbl_size = npages;
Roland Dreier273748c2008-05-06 15:56:22 -0700543 ret = iwch_register_mem(rhp, php, mhp, shift);
544 if (ret)
545 goto err_pbl;
546
Steve Wiseb038ced2007-02-12 16:16:18 -0800547 return &mhp->ibmr;
Roland Dreier273748c2008-05-06 15:56:22 -0700548
549err_pbl:
550 iwch_free_pbl(mhp);
551
Steve Wiseb038ced2007-02-12 16:16:18 -0800552err:
553 kfree(mhp);
554 return ERR_PTR(ret);
Steve Wiseb038ced2007-02-12 16:16:18 -0800555}
556
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800557static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
558 u64 virt, int acc, struct ib_udata *udata)
Steve Wiseb038ced2007-02-12 16:16:18 -0800559{
560 __be64 *pages;
561 int shift, n, len;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200562 int i, k, entry;
Steve Wiseb038ced2007-02-12 16:16:18 -0800563 int err = 0;
Steve Wiseb038ced2007-02-12 16:16:18 -0800564 struct iwch_dev *rhp;
565 struct iwch_pd *php;
566 struct iwch_mr *mhp;
567 struct iwch_reg_user_mr_resp uresp;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200568 struct scatterlist *sg;
Harvey Harrison33718362008-04-16 21:01:10 -0700569 PDBG("%s ib_pd %p\n", __func__, pd);
Steve Wiseb038ced2007-02-12 16:16:18 -0800570
571 php = to_iwch_pd(pd);
572 rhp = php->rhp;
573 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
574 if (!mhp)
575 return ERR_PTR(-ENOMEM);
576
Roland Dreier273748c2008-05-06 15:56:22 -0700577 mhp->rhp = rhp;
578
Arthur Kepnercb9fbc52008-04-29 01:00:34 -0700579 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800580 if (IS_ERR(mhp->umem)) {
581 err = PTR_ERR(mhp->umem);
582 kfree(mhp);
583 return ERR_PTR(err);
584 }
585
586 shift = ffs(mhp->umem->page_size) - 1;
587
Yishai Hadaseeb84612014-01-28 13:40:15 +0200588 n = mhp->umem->nmap;
Steve Wiseb038ced2007-02-12 16:16:18 -0800589
Roland Dreier273748c2008-05-06 15:56:22 -0700590 err = iwch_alloc_pbl(mhp, n);
591 if (err)
592 goto err;
593
594 pages = (__be64 *) __get_free_page(GFP_KERNEL);
Steve Wiseb038ced2007-02-12 16:16:18 -0800595 if (!pages) {
596 err = -ENOMEM;
Roland Dreier273748c2008-05-06 15:56:22 -0700597 goto err_pbl;
Steve Wiseb038ced2007-02-12 16:16:18 -0800598 }
599
Steve Wiseb038ced2007-02-12 16:16:18 -0800600 i = n = 0;
601
Yishai Hadaseeb84612014-01-28 13:40:15 +0200602 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
603 len = sg_dma_len(sg) >> shift;
Steve Wiseb038ced2007-02-12 16:16:18 -0800604 for (k = 0; k < len; ++k) {
Yishai Hadaseeb84612014-01-28 13:40:15 +0200605 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800606 mhp->umem->page_size * k);
Roland Dreier273748c2008-05-06 15:56:22 -0700607 if (i == PAGE_SIZE / sizeof *pages) {
608 err = iwch_write_pbl(mhp, pages, i, n);
609 if (err)
610 goto pbl_done;
611 n += i;
612 i = 0;
613 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800614 }
Yishai Hadaseeb84612014-01-28 13:40:15 +0200615 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800616
Roland Dreier273748c2008-05-06 15:56:22 -0700617 if (i)
618 err = iwch_write_pbl(mhp, pages, i, n);
619
620pbl_done:
621 free_page((unsigned long) pages);
622 if (err)
623 goto err_pbl;
624
Steve Wiseb038ced2007-02-12 16:16:18 -0800625 mhp->attr.pdid = php->pdid;
626 mhp->attr.zbva = 0;
Steve Wisee64518f2007-03-06 14:44:07 -0600627 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800628 mhp->attr.va_fbo = virt;
Steve Wiseb038ced2007-02-12 16:16:18 -0800629 mhp->attr.page_size = shift - 12;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800630 mhp->attr.len = (u32) length;
Roland Dreier273748c2008-05-06 15:56:22 -0700631
632 err = iwch_register_mem(rhp, php, mhp, shift);
Steve Wiseb038ced2007-02-12 16:16:18 -0800633 if (err)
Roland Dreier273748c2008-05-06 15:56:22 -0700634 goto err_pbl;
Steve Wiseb038ced2007-02-12 16:16:18 -0800635
Steve Wise8176d292008-01-24 16:30:16 -0600636 if (udata && !t3a_device(rhp)) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800637 uresp.pbl_addr = (mhp->attr.pbl_addr -
Roland Dreier273748c2008-05-06 15:56:22 -0700638 rhp->rdev.rnic_info.pbl_base) >> 3;
Harvey Harrison33718362008-04-16 21:01:10 -0700639 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -0800640 uresp.pbl_addr);
641
642 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
643 iwch_dereg_mr(&mhp->ibmr);
644 err = -EFAULT;
645 goto err;
646 }
647 }
648
649 return &mhp->ibmr;
650
Roland Dreier273748c2008-05-06 15:56:22 -0700651err_pbl:
652 iwch_free_pbl(mhp);
653
Steve Wiseb038ced2007-02-12 16:16:18 -0800654err:
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800655 ib_umem_release(mhp->umem);
Steve Wiseb038ced2007-02-12 16:16:18 -0800656 kfree(mhp);
657 return ERR_PTR(err);
658}
659
Matan Barakb2a239d2016-02-29 18:05:29 +0200660static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
661 struct ib_udata *udata)
Steve Wiseb038ced2007-02-12 16:16:18 -0800662{
663 struct iwch_dev *rhp;
664 struct iwch_pd *php;
665 struct iwch_mw *mhp;
666 u32 mmid;
667 u32 stag = 0;
668 int ret;
669
Shani Michaeli7083e422013-02-06 16:19:12 +0000670 if (type != IB_MW_TYPE_1)
671 return ERR_PTR(-EINVAL);
672
Steve Wiseb038ced2007-02-12 16:16:18 -0800673 php = to_iwch_pd(pd);
674 rhp = php->rhp;
675 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
676 if (!mhp)
677 return ERR_PTR(-ENOMEM);
678 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
679 if (ret) {
680 kfree(mhp);
681 return ERR_PTR(ret);
682 }
683 mhp->rhp = rhp;
684 mhp->attr.pdid = php->pdid;
685 mhp->attr.type = TPT_MW;
686 mhp->attr.stag = stag;
687 mmid = (stag) >> 8;
Steve Wise70fe1792008-07-14 23:48:49 -0700688 mhp->ibmw.rkey = stag;
Steve Wise13a23932009-09-09 11:25:55 -0700689 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
690 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
691 kfree(mhp);
692 return ERR_PTR(-ENOMEM);
693 }
Harvey Harrison33718362008-04-16 21:01:10 -0700694 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
Steve Wiseb038ced2007-02-12 16:16:18 -0800695 return &(mhp->ibmw);
696}
697
698static int iwch_dealloc_mw(struct ib_mw *mw)
699{
700 struct iwch_dev *rhp;
701 struct iwch_mw *mhp;
702 u32 mmid;
703
704 mhp = to_iwch_mw(mw);
705 rhp = mhp->rhp;
706 mmid = (mw->rkey) >> 8;
707 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
708 remove_handle(rhp, &rhp->mmidr, mmid);
Harvey Harrison33718362008-04-16 21:01:10 -0700709 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
Jesper Juhlfe194f12013-01-14 20:34:09 +0100710 kfree(mhp);
Steve Wiseb038ced2007-02-12 16:16:18 -0800711 return 0;
712}
713
Sagi Grimbergf683d3b2015-07-30 10:32:45 +0300714static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
715 enum ib_mr_type mr_type,
716 u32 max_num_sg)
Steve Wisee7e55822008-07-14 23:48:45 -0700717{
718 struct iwch_dev *rhp;
719 struct iwch_pd *php;
720 struct iwch_mr *mhp;
721 u32 mmid;
722 u32 stag = 0;
Steve Wise13a23932009-09-09 11:25:55 -0700723 int ret = 0;
Steve Wisee7e55822008-07-14 23:48:45 -0700724
Sagi Grimbergf683d3b2015-07-30 10:32:45 +0300725 if (mr_type != IB_MR_TYPE_MEM_REG ||
726 max_num_sg > T3_MAX_FASTREG_DEPTH)
727 return ERR_PTR(-EINVAL);
728
Steve Wisee7e55822008-07-14 23:48:45 -0700729 php = to_iwch_pd(pd);
730 rhp = php->rhp;
731 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
732 if (!mhp)
Steve Wise13a23932009-09-09 11:25:55 -0700733 goto err;
Steve Wisee7e55822008-07-14 23:48:45 -0700734
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300735 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
736 if (!mhp->pages) {
737 ret = -ENOMEM;
738 goto pl_err;
739 }
740
Steve Wisee7e55822008-07-14 23:48:45 -0700741 mhp->rhp = rhp;
Sagi Grimbergf683d3b2015-07-30 10:32:45 +0300742 ret = iwch_alloc_pbl(mhp, max_num_sg);
Steve Wise13a23932009-09-09 11:25:55 -0700743 if (ret)
744 goto err1;
Sagi Grimbergf683d3b2015-07-30 10:32:45 +0300745 mhp->attr.pbl_size = max_num_sg;
Steve Wisee7e55822008-07-14 23:48:45 -0700746 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
747 mhp->attr.pbl_size, mhp->attr.pbl_addr);
Steve Wise13a23932009-09-09 11:25:55 -0700748 if (ret)
749 goto err2;
Steve Wisee7e55822008-07-14 23:48:45 -0700750 mhp->attr.pdid = php->pdid;
751 mhp->attr.type = TPT_NON_SHARED_MR;
752 mhp->attr.stag = stag;
753 mhp->attr.state = 1;
754 mmid = (stag) >> 8;
755 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
Steve Wise13a23932009-09-09 11:25:55 -0700756 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
757 goto err3;
758
Steve Wisee7e55822008-07-14 23:48:45 -0700759 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
760 return &(mhp->ibmr);
Steve Wise13a23932009-09-09 11:25:55 -0700761err3:
762 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
763 mhp->attr.pbl_addr);
764err2:
765 iwch_free_pbl(mhp);
766err1:
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300767 kfree(mhp->pages);
768pl_err:
Steve Wise13a23932009-09-09 11:25:55 -0700769 kfree(mhp);
770err:
771 return ERR_PTR(ret);
Steve Wisee7e55822008-07-14 23:48:45 -0700772}
773
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300774static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
775{
776 struct iwch_mr *mhp = to_iwch_mr(ibmr);
777
778 if (unlikely(mhp->npages == mhp->attr.pbl_size))
779 return -ENOMEM;
780
781 mhp->pages[mhp->npages++] = addr;
782
783 return 0;
784}
785
Christoph Hellwigff2ba992016-05-03 18:01:04 +0200786static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
Bart Van Assche9aa8b322016-05-12 10:49:15 -0700787 int sg_nents, unsigned int *sg_offset)
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300788{
789 struct iwch_mr *mhp = to_iwch_mr(ibmr);
790
791 mhp->npages = 0;
792
Christoph Hellwigff2ba992016-05-03 18:01:04 +0200793 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
Sagi Grimberg14fb4172015-10-13 19:11:29 +0300794}
795
Steve Wiseb038ced2007-02-12 16:16:18 -0800796static int iwch_destroy_qp(struct ib_qp *ib_qp)
797{
798 struct iwch_dev *rhp;
799 struct iwch_qp *qhp;
800 struct iwch_qp_attributes attrs;
801 struct iwch_ucontext *ucontext;
802
803 qhp = to_iwch_qp(ib_qp);
804 rhp = qhp->rhp;
805
Steve Wise2df50da2007-03-06 14:43:58 -0600806 attrs.next_state = IWCH_QP_STATE_ERROR;
807 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
Steve Wiseb038ced2007-02-12 16:16:18 -0800808 wait_event(qhp->wait, !qhp->ep);
809
810 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
811
812 atomic_dec(&qhp->refcnt);
813 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
814
815 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
816 : NULL;
817 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
818 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
819
Harvey Harrison33718362008-04-16 21:01:10 -0700820 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -0800821 ib_qp, qhp->wq.qpid, qhp);
822 kfree(qhp);
823 return 0;
824}
825
826static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
827 struct ib_qp_init_attr *attrs,
828 struct ib_udata *udata)
829{
830 struct iwch_dev *rhp;
831 struct iwch_qp *qhp;
832 struct iwch_pd *php;
833 struct iwch_cq *schp;
834 struct iwch_cq *rchp;
835 struct iwch_create_qp_resp uresp;
836 int wqsize, sqsize, rqsize;
837 struct iwch_ucontext *ucontext;
838
Harvey Harrison33718362008-04-16 21:01:10 -0700839 PDBG("%s ib_pd %p\n", __func__, pd);
Steve Wiseb038ced2007-02-12 16:16:18 -0800840 if (attrs->qp_type != IB_QPT_RC)
841 return ERR_PTR(-EINVAL);
842 php = to_iwch_pd(pd);
843 rhp = php->rhp;
844 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
845 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
846 if (!schp || !rchp)
847 return ERR_PTR(-EINVAL);
848
849 /* The RQT size must be # of entries + 1 rounded up to a power of two */
850 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
851 if (rqsize == attrs->cap.max_recv_wr)
852 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
853
854 /* T3 doesn't support RQT depth < 16 */
855 if (rqsize < 16)
856 rqsize = 16;
857
858 if (rqsize > T3_MAX_RQ_SIZE)
859 return ERR_PTR(-EINVAL);
860
Steve Wise1860cdf2007-04-26 15:21:09 -0500861 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
862 return ERR_PTR(-EINVAL);
863
Steve Wiseb038ced2007-02-12 16:16:18 -0800864 /*
865 * NOTE: The SQ and total WQ sizes don't need to be
866 * a power of two. However, all the code assumes
867 * they are. EG: Q_FREECNT() and friends.
868 */
869 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
870 wqsize = roundup_pow_of_two(rqsize + sqsize);
Steve Wisee7e55822008-07-14 23:48:45 -0700871
872 /*
873 * Kernel users need more wq space for fastreg WRs which can take
874 * 2 WR fragments.
875 */
876 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
877 if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
878 wqsize = roundup_pow_of_two(rqsize +
879 roundup_pow_of_two(attrs->cap.max_send_wr * 2));
Harvey Harrison33718362008-04-16 21:01:10 -0700880 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -0800881 wqsize, sqsize, rqsize);
882 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
883 if (!qhp)
884 return ERR_PTR(-ENOMEM);
885 qhp->wq.size_log2 = ilog2(wqsize);
886 qhp->wq.rq_size_log2 = ilog2(rqsize);
887 qhp->wq.sq_size_log2 = ilog2(sqsize);
Steve Wiseb038ced2007-02-12 16:16:18 -0800888 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
889 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
890 kfree(qhp);
891 return ERR_PTR(-ENOMEM);
892 }
Jon Mason1bab74e2008-02-29 13:53:18 -0800893
Steve Wiseb038ced2007-02-12 16:16:18 -0800894 attrs->cap.max_recv_wr = rqsize - 1;
895 attrs->cap.max_send_wr = sqsize;
Jon Mason1bab74e2008-02-29 13:53:18 -0800896 attrs->cap.max_inline_data = T3_MAX_INLINE;
897
Steve Wiseb038ced2007-02-12 16:16:18 -0800898 qhp->rhp = rhp;
899 qhp->attr.pd = php->pdid;
900 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
901 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
902 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
903 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
904 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
905 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
906 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
907 qhp->attr.state = IWCH_QP_STATE_IDLE;
908 qhp->attr.next_state = IWCH_QP_STATE_IDLE;
909
910 /*
911 * XXX - These don't get passed in from the openib user
912 * at create time. The CM sets them via a QP modify.
913 * Need to fix... I think the CM should
914 */
915 qhp->attr.enable_rdma_read = 1;
916 qhp->attr.enable_rdma_write = 1;
917 qhp->attr.enable_bind = 1;
918 qhp->attr.max_ord = 1;
919 qhp->attr.max_ird = 1;
920
921 spin_lock_init(&qhp->lock);
922 init_waitqueue_head(&qhp->wait);
923 atomic_set(&qhp->refcnt, 1);
Steve Wise13a23932009-09-09 11:25:55 -0700924
925 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
926 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
927 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
928 kfree(qhp);
929 return ERR_PTR(-ENOMEM);
930 }
Steve Wiseb038ced2007-02-12 16:16:18 -0800931
932 if (udata) {
933
934 struct iwch_mm_entry *mm1, *mm2;
935
936 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
937 if (!mm1) {
938 iwch_destroy_qp(&qhp->ibqp);
939 return ERR_PTR(-ENOMEM);
940 }
941
942 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
943 if (!mm2) {
944 kfree(mm1);
945 iwch_destroy_qp(&qhp->ibqp);
946 return ERR_PTR(-ENOMEM);
947 }
948
949 uresp.qpid = qhp->wq.qpid;
950 uresp.size_log2 = qhp->wq.size_log2;
951 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
952 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
953 spin_lock(&ucontext->mmap_lock);
954 uresp.key = ucontext->key;
955 ucontext->key += PAGE_SIZE;
956 uresp.db_key = ucontext->key;
957 ucontext->key += PAGE_SIZE;
958 spin_unlock(&ucontext->mmap_lock);
959 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
960 kfree(mm1);
961 kfree(mm2);
962 iwch_destroy_qp(&qhp->ibqp);
963 return ERR_PTR(-EFAULT);
964 }
965 mm1->key = uresp.key;
966 mm1->addr = virt_to_phys(qhp->wq.queue);
967 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
968 insert_mmap(ucontext, mm1);
969 mm2->key = uresp.db_key;
970 mm2->addr = qhp->wq.udb & PAGE_MASK;
971 mm2->len = PAGE_SIZE;
972 insert_mmap(ucontext, mm2);
973 }
974 qhp->ibqp.qp_num = qhp->wq.qpid;
975 init_timer(&(qhp->timer));
976 PDBG("%s sq_num_entries %d, rq_num_entries %d "
Steve Wise4ab928f2008-07-14 23:48:53 -0700977 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700978 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
Steve Wiseb038ced2007-02-12 16:16:18 -0800979 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
Steve Wise4ab928f2008-07-14 23:48:53 -0700980 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
Steve Wiseb038ced2007-02-12 16:16:18 -0800981 return &qhp->ibqp;
982}
983
984static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
985 int attr_mask, struct ib_udata *udata)
986{
987 struct iwch_dev *rhp;
988 struct iwch_qp *qhp;
989 enum iwch_qp_attr_mask mask = 0;
990 struct iwch_qp_attributes attrs;
991
Harvey Harrison33718362008-04-16 21:01:10 -0700992 PDBG("%s ib_qp %p\n", __func__, ibqp);
Steve Wiseb038ced2007-02-12 16:16:18 -0800993
994 /* iwarp does not support the RTR state */
995 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
996 attr_mask &= ~IB_QP_STATE;
997
998 /* Make sure we still have something left to do */
999 if (!attr_mask)
1000 return 0;
1001
1002 memset(&attrs, 0, sizeof attrs);
1003 qhp = to_iwch_qp(ibqp);
1004 rhp = qhp->rhp;
1005
1006 attrs.next_state = iwch_convert_state(attr->qp_state);
1007 attrs.enable_rdma_read = (attr->qp_access_flags &
1008 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1009 attrs.enable_rdma_write = (attr->qp_access_flags &
1010 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1011 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1012
1013
1014 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
1015 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1016 (IWCH_QP_ATTR_ENABLE_RDMA_READ |
1017 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
1018 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1019
1020 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
1021}
1022
1023void iwch_qp_add_ref(struct ib_qp *qp)
1024{
Harvey Harrison33718362008-04-16 21:01:10 -07001025 PDBG("%s ib_qp %p\n", __func__, qp);
Steve Wiseb038ced2007-02-12 16:16:18 -08001026 atomic_inc(&(to_iwch_qp(qp)->refcnt));
1027}
1028
1029void iwch_qp_rem_ref(struct ib_qp *qp)
1030{
Harvey Harrison33718362008-04-16 21:01:10 -07001031 PDBG("%s ib_qp %p\n", __func__, qp);
Steve Wiseb038ced2007-02-12 16:16:18 -08001032 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
1033 wake_up(&(to_iwch_qp(qp)->wait));
1034}
1035
Adrian Bunk2b540352007-02-21 11:52:49 +01001036static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
Steve Wiseb038ced2007-02-12 16:16:18 -08001037{
Harvey Harrison33718362008-04-16 21:01:10 -07001038 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
Steve Wiseb038ced2007-02-12 16:16:18 -08001039 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1040}
1041
1042
1043static int iwch_query_pkey(struct ib_device *ibdev,
1044 u8 port, u16 index, u16 * pkey)
1045{
Harvey Harrison33718362008-04-16 21:01:10 -07001046 PDBG("%s ibdev %p\n", __func__, ibdev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001047 *pkey = 0;
1048 return 0;
1049}
1050
1051static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1052 int index, union ib_gid *gid)
1053{
1054 struct iwch_dev *dev;
1055
1056 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
Harvey Harrison33718362008-04-16 21:01:10 -07001057 __func__, ibdev, port, index, gid);
Steve Wiseb038ced2007-02-12 16:16:18 -08001058 dev = to_iwch_dev(ibdev);
1059 BUG_ON(port == 0 || port > 2);
1060 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1061 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
1062 return 0;
1063}
1064
Steve Wise97d1cc82008-07-14 23:48:47 -07001065static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1066{
1067 struct ethtool_drvinfo info;
1068 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1069 char *cp, *next;
1070 unsigned fw_maj, fw_min, fw_mic;
1071
Steve Wise97d1cc82008-07-14 23:48:47 -07001072 lldev->ethtool_ops->get_drvinfo(lldev, &info);
Steve Wise97d1cc82008-07-14 23:48:47 -07001073
1074 next = info.fw_version + 1;
1075 cp = strsep(&next, ".");
1076 sscanf(cp, "%i", &fw_maj);
1077 cp = strsep(&next, ".");
1078 sscanf(cp, "%i", &fw_min);
1079 cp = strsep(&next, ".");
1080 sscanf(cp, "%i", &fw_mic);
1081
1082 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1083 (fw_mic & 0xffff);
1084}
1085
Matan Barak2528e332015-06-11 16:35:25 +03001086static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1087 struct ib_udata *uhw)
Steve Wiseb038ced2007-02-12 16:16:18 -08001088{
1089
1090 struct iwch_dev *dev;
Matan Barak2528e332015-06-11 16:35:25 +03001091
Harvey Harrison33718362008-04-16 21:01:10 -07001092 PDBG("%s ibdev %p\n", __func__, ibdev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001093
Matan Barak2528e332015-06-11 16:35:25 +03001094 if (uhw->inlen || uhw->outlen)
1095 return -EINVAL;
1096
Steve Wiseb038ced2007-02-12 16:16:18 -08001097 dev = to_iwch_dev(ibdev);
1098 memset(props, 0, sizeof *props);
1099 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
Steve Wise97d1cc82008-07-14 23:48:47 -07001100 props->hw_ver = dev->rdev.t3cdev_p->type;
1101 props->fw_ver = fw_vers_string_to_u64(dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001102 props->device_cap_flags = dev->device_cap_flags;
Jon Mason52c80842008-07-14 23:48:49 -07001103 props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
Steve Wiseb038ced2007-02-12 16:16:18 -08001104 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1105 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
Steve Wiseccaf10d2008-04-29 13:46:52 -07001106 props->max_mr_size = dev->attr.max_mr_size;
Steve Wiseb038ced2007-02-12 16:16:18 -08001107 props->max_qp = dev->attr.max_qps;
1108 props->max_qp_wr = dev->attr.max_wrs;
1109 props->max_sge = dev->attr.max_sge_per_wr;
1110 props->max_sge_rd = 1;
1111 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
Steve Wise9a766642007-11-09 09:21:58 -06001112 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
Steve Wiseb038ced2007-02-12 16:16:18 -08001113 props->max_cq = dev->attr.max_cqs;
1114 props->max_cqe = dev->attr.max_cqes_per_cq;
1115 props->max_mr = dev->attr.max_mem_regs;
1116 props->max_pd = dev->attr.max_pds;
1117 props->local_ca_ack_delay = 0;
Steve Wisee7e55822008-07-14 23:48:45 -07001118 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
Steve Wiseb038ced2007-02-12 16:16:18 -08001119
1120 return 0;
1121}
1122
1123static int iwch_query_port(struct ib_device *ibdev,
1124 u8 port, struct ib_port_attr *props)
1125{
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001126 struct iwch_dev *dev;
1127 struct net_device *netdev;
1128 struct in_device *inetdev;
1129
Harvey Harrison33718362008-04-16 21:01:10 -07001130 PDBG("%s ibdev %p\n", __func__, ibdev);
Jon Masonc752c782008-09-30 14:51:19 -07001131
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001132 dev = to_iwch_dev(ibdev);
1133 netdev = dev->rdev.port_info.lldevs[port-1];
1134
Jon Masonc752c782008-09-30 14:51:19 -07001135 memset(props, 0, sizeof(struct ib_port_attr));
Steve Wiseb038ced2007-02-12 16:16:18 -08001136 props->max_mtu = IB_MTU_4096;
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001137 if (netdev->mtu >= 4096)
1138 props->active_mtu = IB_MTU_4096;
1139 else if (netdev->mtu >= 2048)
1140 props->active_mtu = IB_MTU_2048;
1141 else if (netdev->mtu >= 1024)
1142 props->active_mtu = IB_MTU_1024;
1143 else if (netdev->mtu >= 512)
1144 props->active_mtu = IB_MTU_512;
1145 else
1146 props->active_mtu = IB_MTU_256;
1147
1148 if (!netif_carrier_ok(netdev))
1149 props->state = IB_PORT_DOWN;
1150 else {
1151 inetdev = in_dev_get(netdev);
Steve Wisee5da4ed2009-10-07 15:51:07 -07001152 if (inetdev) {
1153 if (inetdev->ifa_list)
1154 props->state = IB_PORT_ACTIVE;
1155 else
1156 props->state = IB_PORT_INIT;
1157 in_dev_put(inetdev);
1158 } else
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001159 props->state = IB_PORT_INIT;
Steve Wise7ab1a2b2009-05-27 14:42:36 -07001160 }
1161
Steve Wiseb038ced2007-02-12 16:16:18 -08001162 props->port_cap_flags =
1163 IB_PORT_CM_SUP |
1164 IB_PORT_SNMP_TUNNEL_SUP |
1165 IB_PORT_REINIT_SUP |
1166 IB_PORT_DEVICE_MGMT_SUP |
1167 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1168 props->gid_tbl_len = 1;
1169 props->pkey_tbl_len = 1;
Steve Wiseb038ced2007-02-12 16:16:18 -08001170 props->active_width = 2;
Or Gerlitz2e966912012-02-28 18:49:50 +02001171 props->active_speed = IB_SPEED_DDR;
Steve Wiseb038ced2007-02-12 16:16:18 -08001172 props->max_msg_sz = -1;
1173
1174 return 0;
1175}
1176
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001177static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
1178 char *buf)
Steve Wiseb038ced2007-02-12 16:16:18 -08001179{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001180 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1181 ibdev.dev);
1182 PDBG("%s dev 0x%p\n", __func__, dev);
1183 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
Steve Wiseb038ced2007-02-12 16:16:18 -08001184}
1185
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001186static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1187 char *buf)
Steve Wiseb038ced2007-02-12 16:16:18 -08001188{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001189 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1190 ibdev.dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001191 struct ethtool_drvinfo info;
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001192 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
Steve Wiseb038ced2007-02-12 16:16:18 -08001193
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001194 PDBG("%s dev 0x%p\n", __func__, dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001195 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1196 return sprintf(buf, "%s\n", info.driver);
1197}
1198
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001199static ssize_t show_board(struct device *dev, struct device_attribute *attr,
1200 char *buf)
Steve Wiseb038ced2007-02-12 16:16:18 -08001201{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001202 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1203 ibdev.dev);
1204 PDBG("%s dev 0x%p\n", __func__, dev);
1205 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1206 iwch_dev->rdev.rnic_info.pdev->device);
Steve Wiseb038ced2007-02-12 16:16:18 -08001207}
1208
Christoph Lameterb40f4752016-05-16 12:49:33 -05001209enum counters {
1210 IPINRECEIVES,
1211 IPINHDRERRORS,
1212 IPINADDRERRORS,
1213 IPINUNKNOWNPROTOS,
1214 IPINDISCARDS,
1215 IPINDELIVERS,
1216 IPOUTREQUESTS,
1217 IPOUTDISCARDS,
1218 IPOUTNOROUTES,
1219 IPREASMTIMEOUT,
1220 IPREASMREQDS,
1221 IPREASMOKS,
1222 IPREASMFAILS,
1223 TCPACTIVEOPENS,
1224 TCPPASSIVEOPENS,
1225 TCPATTEMPTFAILS,
1226 TCPESTABRESETS,
1227 TCPCURRESTAB,
1228 TCPINSEGS,
1229 TCPOUTSEGS,
1230 TCPRETRANSSEGS,
1231 TCPINERRS,
1232 TCPOUTRSTS,
1233 TCPRTOMIN,
1234 TCPRTOMAX,
1235 NR_COUNTERS
1236};
1237
1238static const char * const names[] = {
1239 [IPINRECEIVES] = "ipInReceives",
1240 [IPINHDRERRORS] = "ipInHdrErrors",
1241 [IPINADDRERRORS] = "ipInAddrErrors",
1242 [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
1243 [IPINDISCARDS] = "ipInDiscards",
1244 [IPINDELIVERS] = "ipInDelivers",
1245 [IPOUTREQUESTS] = "ipOutRequests",
1246 [IPOUTDISCARDS] = "ipOutDiscards",
1247 [IPOUTNOROUTES] = "ipOutNoRoutes",
1248 [IPREASMTIMEOUT] = "ipReasmTimeout",
1249 [IPREASMREQDS] = "ipReasmReqds",
1250 [IPREASMOKS] = "ipReasmOKs",
1251 [IPREASMFAILS] = "ipReasmFails",
1252 [TCPACTIVEOPENS] = "tcpActiveOpens",
1253 [TCPPASSIVEOPENS] = "tcpPassiveOpens",
1254 [TCPATTEMPTFAILS] = "tcpAttemptFails",
1255 [TCPESTABRESETS] = "tcpEstabResets",
1256 [TCPCURRESTAB] = "tcpCurrEstab",
1257 [TCPINSEGS] = "tcpInSegs",
1258 [TCPOUTSEGS] = "tcpOutSegs",
1259 [TCPRETRANSSEGS] = "tcpRetransSegs",
1260 [TCPINERRS] = "tcpInErrs",
1261 [TCPOUTRSTS] = "tcpOutRsts",
1262 [TCPRTOMIN] = "tcpRtoMin",
1263 [TCPRTOMAX] = "tcpRtoMax",
1264};
1265
1266static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
1267 u8 port_num)
1268{
1269 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
1270
1271 /* Our driver only supports device level stats */
1272 if (port_num != 0)
1273 return NULL;
1274
1275 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
1276 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1277}
1278
1279static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1280 u8 port, int index)
Steve Wise14cc1802008-07-14 23:48:48 -07001281{
1282 struct iwch_dev *dev;
1283 struct tp_mib_stats m;
1284 int ret;
1285
Christoph Lameterb40f4752016-05-16 12:49:33 -05001286 if (port != 0 || !stats)
1287 return -ENOSYS;
1288
Steve Wise14cc1802008-07-14 23:48:48 -07001289 PDBG("%s ibdev %p\n", __func__, ibdev);
1290 dev = to_iwch_dev(ibdev);
1291 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1292 if (ret)
1293 return -ENOSYS;
1294
Christoph Lameterb40f4752016-05-16 12:49:33 -05001295 stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
1296 stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
1297 stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
1298 stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
1299 stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
1300 stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
1301 stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
1302 stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
1303 stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
1304 stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
1305 stats->value[IPREASMREQDS] = m.ipReasmReqds;
1306 stats->value[IPREASMOKS] = m.ipReasmOKs;
1307 stats->value[IPREASMFAILS] = m.ipReasmFails;
1308 stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
1309 stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
1310 stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
1311 stats->value[TCPESTABRESETS] = m.tcpEstabResets;
1312 stats->value[TCPCURRESTAB] = m.tcpOutRsts;
1313 stats->value[TCPINSEGS] = m.tcpCurrEstab;
1314 stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
1315 stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
1316 stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
1317 stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
1318 stats->value[TCPRTOMIN] = m.tcpRtoMin;
1319 stats->value[TCPRTOMAX] = m.tcpRtoMax;
1320
1321 return stats->num_counters;
Steve Wise14cc1802008-07-14 23:48:48 -07001322}
1323
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001324static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001325static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1326static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
Steve Wiseb038ced2007-02-12 16:16:18 -08001327
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001328static struct device_attribute *iwch_class_attributes[] = {
1329 &dev_attr_hw_rev,
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001330 &dev_attr_hca_type,
Steve Wise14cc1802008-07-14 23:48:48 -07001331 &dev_attr_board_id,
Steve Wiseb038ced2007-02-12 16:16:18 -08001332};
1333
Ira Weiny77386132015-05-13 20:02:58 -04001334static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
1335 struct ib_port_immutable *immutable)
1336{
1337 struct ib_port_attr attr;
1338 int err;
1339
1340 err = iwch_query_port(ibdev, port_num, &attr);
1341 if (err)
1342 return err;
1343
1344 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1345 immutable->gid_tbl_len = attr.gid_tbl_len;
Ira Weinyf9b22e32015-05-13 20:02:59 -04001346 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
Ira Weiny77386132015-05-13 20:02:58 -04001347
1348 return 0;
1349}
1350
Ira Weinye1803692016-06-15 02:21:57 -04001351static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str,
1352 size_t str_len)
1353{
1354 struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
1355 struct ethtool_drvinfo info;
1356 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1357
1358 PDBG("%s dev 0x%p\n", __func__, iwch_dev);
1359 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1360 snprintf(str, str_len, "%s", info.fw_version);
1361}
1362
Steve Wiseb038ced2007-02-12 16:16:18 -08001363int iwch_register_device(struct iwch_dev *dev)
1364{
1365 int ret;
1366 int i;
1367
Harvey Harrison33718362008-04-16 21:01:10 -07001368 PDBG("%s iwch_dev %p\n", __func__, dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001369 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1370 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1371 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1372 dev->ibdev.owner = THIS_MODULE;
Steve Wisebe433242008-08-04 11:08:37 -07001373 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1374 IB_DEVICE_MEM_WINDOW |
1375 IB_DEVICE_MEM_MGT_EXTENSIONS;
Steve Wise96f15c02008-07-14 23:48:53 -07001376
1377 /* cxgb3 supports STag 0. */
1378 dev->ibdev.local_dma_lkey = 0;
Steve Wiseb038ced2007-02-12 16:16:18 -08001379
1380 dev->ibdev.uverbs_cmd_mask =
1381 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1382 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1383 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1384 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1385 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1386 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1387 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1388 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1389 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1390 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1391 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1392 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1393 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1394 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1395 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1396 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1397 (1ull << IB_USER_VERBS_CMD_POST_RECV);
1398 dev->ibdev.node_type = RDMA_NODE_RNIC;
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001399 BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
Steve Wiseb038ced2007-02-12 16:16:18 -08001400 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1401 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001402 dev->ibdev.num_comp_vectors = 1;
Steve Wiseb038ced2007-02-12 16:16:18 -08001403 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001404 dev->ibdev.query_device = iwch_query_device;
1405 dev->ibdev.query_port = iwch_query_port;
Steve Wiseb038ced2007-02-12 16:16:18 -08001406 dev->ibdev.query_pkey = iwch_query_pkey;
1407 dev->ibdev.query_gid = iwch_query_gid;
1408 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1409 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1410 dev->ibdev.mmap = iwch_mmap;
1411 dev->ibdev.alloc_pd = iwch_allocate_pd;
1412 dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1413 dev->ibdev.create_ah = iwch_ah_create;
1414 dev->ibdev.destroy_ah = iwch_ah_destroy;
1415 dev->ibdev.create_qp = iwch_create_qp;
1416 dev->ibdev.modify_qp = iwch_ib_modify_qp;
1417 dev->ibdev.destroy_qp = iwch_destroy_qp;
1418 dev->ibdev.create_cq = iwch_create_cq;
1419 dev->ibdev.destroy_cq = iwch_destroy_cq;
1420 dev->ibdev.resize_cq = iwch_resize_cq;
1421 dev->ibdev.poll_cq = iwch_poll_cq;
1422 dev->ibdev.get_dma_mr = iwch_get_dma_mr;
Steve Wiseb038ced2007-02-12 16:16:18 -08001423 dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1424 dev->ibdev.dereg_mr = iwch_dereg_mr;
1425 dev->ibdev.alloc_mw = iwch_alloc_mw;
Steve Wiseb038ced2007-02-12 16:16:18 -08001426 dev->ibdev.dealloc_mw = iwch_dealloc_mw;
Sagi Grimbergf683d3b2015-07-30 10:32:45 +03001427 dev->ibdev.alloc_mr = iwch_alloc_mr;
Sagi Grimberg14fb4172015-10-13 19:11:29 +03001428 dev->ibdev.map_mr_sg = iwch_map_mr_sg;
Steve Wiseb038ced2007-02-12 16:16:18 -08001429 dev->ibdev.attach_mcast = iwch_multicast_attach;
1430 dev->ibdev.detach_mcast = iwch_multicast_detach;
1431 dev->ibdev.process_mad = iwch_process_mad;
Steve Wiseb038ced2007-02-12 16:16:18 -08001432 dev->ibdev.req_notify_cq = iwch_arm_cq;
1433 dev->ibdev.post_send = iwch_post_send;
1434 dev->ibdev.post_recv = iwch_post_receive;
Christoph Lameterb40f4752016-05-16 12:49:33 -05001435 dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
1436 dev->ibdev.get_hw_stats = iwch_get_mib;
Steve Wiseb9551502010-10-21 12:37:06 +00001437 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
Ira Weiny77386132015-05-13 20:02:58 -04001438 dev->ibdev.get_port_immutable = iwch_port_immutable;
Ira Weinye1803692016-06-15 02:21:57 -04001439 dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str;
Steve Wiseb038ced2007-02-12 16:16:18 -08001440
WANG Cong6abb6ea2007-07-09 20:12:26 -07001441 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
1442 if (!dev->ibdev.iwcm)
1443 return -ENOMEM;
1444
Steve Wiseb038ced2007-02-12 16:16:18 -08001445 dev->ibdev.iwcm->connect = iwch_connect;
1446 dev->ibdev.iwcm->accept = iwch_accept_cr;
1447 dev->ibdev.iwcm->reject = iwch_reject_cr;
1448 dev->ibdev.iwcm->create_listen = iwch_create_listen;
1449 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1450 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1451 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1452 dev->ibdev.iwcm->get_qp = iwch_get_qp;
Steve Wisead202342016-04-12 06:55:01 -07001453 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1454 sizeof(dev->ibdev.iwcm->ifname));
Steve Wiseb038ced2007-02-12 16:16:18 -08001455
Ralph Campbell9a6edb62010-05-06 17:03:25 -07001456 ret = ib_register_device(&dev->ibdev, NULL);
Steve Wiseb038ced2007-02-12 16:16:18 -08001457 if (ret)
1458 goto bail1;
1459
1460 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001461 ret = device_create_file(&dev->ibdev.dev,
1462 iwch_class_attributes[i]);
Steve Wiseb038ced2007-02-12 16:16:18 -08001463 if (ret) {
1464 goto bail2;
1465 }
1466 }
1467 return 0;
1468bail2:
1469 ib_unregister_device(&dev->ibdev);
1470bail1:
Steve Wise3793d2f2009-09-05 20:22:36 -07001471 kfree(dev->ibdev.iwcm);
Steve Wiseb038ced2007-02-12 16:16:18 -08001472 return ret;
1473}
1474
1475void iwch_unregister_device(struct iwch_dev *dev)
1476{
1477 int i;
1478
Harvey Harrison33718362008-04-16 21:01:10 -07001479 PDBG("%s iwch_dev %p\n", __func__, dev);
Steve Wiseb038ced2007-02-12 16:16:18 -08001480 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001481 device_remove_file(&dev->ibdev.dev,
1482 iwch_class_attributes[i]);
Steve Wiseb038ced2007-02-12 16:16:18 -08001483 ib_unregister_device(&dev->ibdev);
Steve Wise3793d2f2009-09-05 20:22:36 -07001484 kfree(dev->ibdev.iwcm);
Steve Wiseb038ced2007-02-12 16:16:18 -08001485 return;
1486}