blob: bc24086989e3b911ea8c82e845dbbef5025b71a2 [file] [log] [blame]
Faisal Latifd3749842016-01-20 13:40:09 -06001/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/random.h>
38#include <linux/highmem.h>
39#include <linux/time.h>
Henry Oroscof26c7c82016-11-30 14:57:40 -060040#include <linux/hugetlb.h>
Faisal Latifd3749842016-01-20 13:40:09 -060041#include <asm/byteorder.h>
42#include <net/ip.h>
43#include <rdma/ib_verbs.h>
44#include <rdma/iw_cm.h>
45#include <rdma/ib_user_verbs.h>
46#include <rdma/ib_umem.h>
47#include "i40iw.h"
48
49/**
50 * i40iw_query_device - get device attributes
51 * @ibdev: device pointer from stack
52 * @props: returning device attributes
53 * @udata: user data
54 */
55static int i40iw_query_device(struct ib_device *ibdev,
56 struct ib_device_attr *props,
57 struct ib_udata *udata)
58{
59 struct i40iw_device *iwdev = to_iwdev(ibdev);
60
61 if (udata->inlen || udata->outlen)
62 return -EINVAL;
63 memset(props, 0, sizeof(*props));
64 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
65 props->fw_ver = I40IW_FW_VERSION;
66 props->device_cap_flags = iwdev->device_cap_flags;
Ismail, Mustafa4920dc32016-04-18 10:33:01 -050067 props->vendor_id = iwdev->ldev->pcidev->vendor;
68 props->vendor_part_id = iwdev->ldev->pcidev->device;
Faisal Latifd3749842016-01-20 13:40:09 -060069 props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
70 props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
Henry Orosco85a87c92016-11-09 21:30:28 -060071 props->max_qp = iwdev->max_qp - iwdev->used_qps;
Faisal Latifd3749842016-01-20 13:40:09 -060072 props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
73 props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
Henry Orosco85a87c92016-11-09 21:30:28 -060074 props->max_cq = iwdev->max_cq - iwdev->used_cqs;
Faisal Latifd3749842016-01-20 13:40:09 -060075 props->max_cqe = iwdev->max_cqe;
Henry Orosco85a87c92016-11-09 21:30:28 -060076 props->max_mr = iwdev->max_mr - iwdev->used_mrs;
77 props->max_pd = iwdev->max_pd - iwdev->used_pds;
Shiraz Saleem6c2f7612016-04-22 14:14:27 -050078 props->max_sge_rd = I40IW_MAX_SGE_RD;
Faisal Latifd3749842016-01-20 13:40:09 -060079 props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
80 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
81 props->atomic_cap = IB_ATOMIC_NONE;
82 props->max_map_per_fmr = 1;
Faisal Latif0477e182016-06-14 16:54:18 -050083 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
Faisal Latifd3749842016-01-20 13:40:09 -060084 return 0;
85}
86
87/**
88 * i40iw_query_port - get port attrubutes
89 * @ibdev: device pointer from stack
90 * @port: port number for query
91 * @props: returning device attributes
92 */
93static int i40iw_query_port(struct ib_device *ibdev,
94 u8 port,
95 struct ib_port_attr *props)
96{
97 struct i40iw_device *iwdev = to_iwdev(ibdev);
98 struct net_device *netdev = iwdev->netdev;
99
100 memset(props, 0, sizeof(*props));
101
102 props->max_mtu = IB_MTU_4096;
103 if (netdev->mtu >= 4096)
104 props->active_mtu = IB_MTU_4096;
105 else if (netdev->mtu >= 2048)
106 props->active_mtu = IB_MTU_2048;
107 else if (netdev->mtu >= 1024)
108 props->active_mtu = IB_MTU_1024;
109 else if (netdev->mtu >= 512)
110 props->active_mtu = IB_MTU_512;
111 else
112 props->active_mtu = IB_MTU_256;
113
114 props->lid = 1;
115 if (netif_carrier_ok(iwdev->netdev))
116 props->state = IB_PORT_ACTIVE;
117 else
118 props->state = IB_PORT_DOWN;
119 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
120 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
121 props->gid_tbl_len = 1;
122 props->pkey_tbl_len = 1;
123 props->active_width = IB_WIDTH_4X;
124 props->active_speed = 1;
Ismail, Mustafabd57aea2016-04-18 10:32:57 -0500125 props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
Faisal Latifd3749842016-01-20 13:40:09 -0600126 return 0;
127}
128
129/**
130 * i40iw_alloc_ucontext - Allocate the user context data structure
131 * @ibdev: device pointer from stack
132 * @udata: user data
133 *
134 * This keeps track of all objects associated with a particular
135 * user-mode client.
136 */
137static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
138 struct ib_udata *udata)
139{
140 struct i40iw_device *iwdev = to_iwdev(ibdev);
141 struct i40iw_alloc_ucontext_req req;
142 struct i40iw_alloc_ucontext_resp uresp;
143 struct i40iw_ucontext *ucontext;
144
145 if (ib_copy_from_udata(&req, udata, sizeof(req)))
146 return ERR_PTR(-EINVAL);
147
148 if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
149 i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
150 req.userspace_ver, I40IW_ABI_USERSPACE_VER);
151 return ERR_PTR(-EINVAL);
152 }
153
154 memset(&uresp, 0, sizeof(uresp));
155 uresp.max_qps = iwdev->max_qp;
156 uresp.max_pds = iwdev->max_pd;
157 uresp.wq_size = iwdev->max_qp_wr * 2;
158 uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
159
160 ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
161 if (!ucontext)
162 return ERR_PTR(-ENOMEM);
163
164 ucontext->iwdev = iwdev;
165
166 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
167 kfree(ucontext);
168 return ERR_PTR(-EFAULT);
169 }
170
171 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
172 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
173 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
174 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
175
176 return &ucontext->ibucontext;
177}
178
179/**
180 * i40iw_dealloc_ucontext - deallocate the user context data structure
181 * @context: user context created during alloc
182 */
183static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
184{
185 struct i40iw_ucontext *ucontext = to_ucontext(context);
186 unsigned long flags;
187
188 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
189 if (!list_empty(&ucontext->cq_reg_mem_list)) {
190 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
191 return -EBUSY;
192 }
193 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
194 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
195 if (!list_empty(&ucontext->qp_reg_mem_list)) {
196 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
197 return -EBUSY;
198 }
199 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
200
201 kfree(ucontext);
202 return 0;
203}
204
205/**
206 * i40iw_mmap - user memory map
207 * @context: context created during alloc
208 * @vma: kernel info for user memory map
209 */
210static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
211{
212 struct i40iw_ucontext *ucontext;
213 u64 db_addr_offset;
214 u64 push_offset;
215
216 ucontext = to_ucontext(context);
217 if (ucontext->iwdev->sc_dev.is_pf) {
218 db_addr_offset = I40IW_DB_ADDR_OFFSET;
219 push_offset = I40IW_PUSH_OFFSET;
220 if (vma->vm_pgoff)
221 vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
222 } else {
223 db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
224 push_offset = I40IW_VF_PUSH_OFFSET;
225 if (vma->vm_pgoff)
226 vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
227 }
228
229 vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
230
231 if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
232 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
233 vma->vm_private_data = ucontext;
234 } else {
235 if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
237 else
238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
239 }
240
241 if (io_remap_pfn_range(vma, vma->vm_start,
242 vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
243 PAGE_SIZE, vma->vm_page_prot))
244 return -EAGAIN;
245
246 return 0;
247}
248
249/**
250 * i40iw_alloc_push_page - allocate a push page for qp
251 * @iwdev: iwarp device
252 * @qp: hardware control qp
253 */
254static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
255{
256 struct i40iw_cqp_request *cqp_request;
257 struct cqp_commands_info *cqp_info;
Faisal Latifd3749842016-01-20 13:40:09 -0600258 enum i40iw_status_code status;
259
260 if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
261 return;
262
263 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
264 if (!cqp_request)
265 return;
266
267 atomic_inc(&cqp_request->refcount);
268
269 cqp_info = &cqp_request->info;
270 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
271 cqp_info->post_sq = 1;
272
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500273 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
Faisal Latifd3749842016-01-20 13:40:09 -0600274 cqp_info->in.u.manage_push_page.info.free_page = 0;
275 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
276 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
277
278 status = i40iw_handle_cqp_op(iwdev, cqp_request);
279 if (!status)
280 qp->push_idx = cqp_request->compl_info.op_ret_val;
281 else
282 i40iw_pr_err("CQP-OP Push page fail");
283 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
284}
285
286/**
287 * i40iw_dealloc_push_page - free a push page for qp
288 * @iwdev: iwarp device
289 * @qp: hardware control qp
290 */
291static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
292{
293 struct i40iw_cqp_request *cqp_request;
294 struct cqp_commands_info *cqp_info;
Faisal Latifd3749842016-01-20 13:40:09 -0600295 enum i40iw_status_code status;
296
297 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
298 return;
299
300 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
301 if (!cqp_request)
302 return;
303
304 cqp_info = &cqp_request->info;
305 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
306 cqp_info->post_sq = 1;
307
308 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500309 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
Faisal Latifd3749842016-01-20 13:40:09 -0600310 cqp_info->in.u.manage_push_page.info.free_page = 1;
311 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
312 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
313
314 status = i40iw_handle_cqp_op(iwdev, cqp_request);
315 if (!status)
316 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
317 else
318 i40iw_pr_err("CQP-OP Push page fail");
319}
320
321/**
322 * i40iw_alloc_pd - allocate protection domain
323 * @ibdev: device pointer from stack
324 * @context: user context created during alloc
325 * @udata: user data
326 */
327static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
328 struct ib_ucontext *context,
329 struct ib_udata *udata)
330{
331 struct i40iw_pd *iwpd;
332 struct i40iw_device *iwdev = to_iwdev(ibdev);
333 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
334 struct i40iw_alloc_pd_resp uresp;
335 struct i40iw_sc_pd *sc_pd;
336 u32 pd_id = 0;
337 int err;
338
Mustafa Ismaild5965932016-11-30 14:59:26 -0600339 if (iwdev->closing)
340 return ERR_PTR(-ENODEV);
341
Faisal Latifd3749842016-01-20 13:40:09 -0600342 err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
343 iwdev->max_pd, &pd_id, &iwdev->next_pd);
344 if (err) {
345 i40iw_pr_err("alloc resource failed\n");
346 return ERR_PTR(err);
347 }
348
349 iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
350 if (!iwpd) {
351 err = -ENOMEM;
352 goto free_res;
353 }
354
355 sc_pd = &iwpd->sc_pd;
356 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
357
358 if (context) {
359 memset(&uresp, 0, sizeof(uresp));
360 uresp.pd_id = pd_id;
361 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
362 err = -EFAULT;
363 goto error;
364 }
365 }
366
367 i40iw_add_pdusecount(iwpd);
368 return &iwpd->ibpd;
369error:
370 kfree(iwpd);
371free_res:
372 i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
373 return ERR_PTR(err);
374}
375
376/**
377 * i40iw_dealloc_pd - deallocate pd
378 * @ibpd: ptr of pd to be deallocated
379 */
380static int i40iw_dealloc_pd(struct ib_pd *ibpd)
381{
382 struct i40iw_pd *iwpd = to_iwpd(ibpd);
383 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
384
385 i40iw_rem_pdusecount(iwpd, iwdev);
386 return 0;
387}
388
389/**
390 * i40iw_qp_roundup - return round up qp ring size
391 * @wr_ring_size: ring size to round up
392 */
393static int i40iw_qp_roundup(u32 wr_ring_size)
394{
395 int scount = 1;
396
397 if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
398 wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
399
400 for (wr_ring_size--; scount <= 16; scount *= 2)
401 wr_ring_size |= wr_ring_size >> scount;
402 return ++wr_ring_size;
403}
404
405/**
406 * i40iw_get_pbl - Retrieve pbl from a list given a virtual
407 * address
408 * @va: user virtual address
409 * @pbl_list: pbl list to search in (QP's or CQ's)
410 */
411static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
412 struct list_head *pbl_list)
413{
414 struct i40iw_pbl *iwpbl;
415
416 list_for_each_entry(iwpbl, pbl_list, list) {
417 if (iwpbl->user_base == va) {
418 list_del(&iwpbl->list);
419 return iwpbl;
420 }
421 }
422 return NULL;
423}
424
425/**
426 * i40iw_free_qp_resources - free up memory resources for qp
427 * @iwdev: iwarp device
428 * @iwqp: qp ptr (user or kernel)
429 * @qp_num: qp number assigned
430 */
431void i40iw_free_qp_resources(struct i40iw_device *iwdev,
432 struct i40iw_qp *iwqp,
433 u32 qp_num)
434{
435 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
436 if (qp_num)
437 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
438 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
439 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
440 kfree(iwqp->kqp.wrid_mem);
441 iwqp->kqp.wrid_mem = NULL;
442 kfree(iwqp->allocated_buffer);
Faisal Latifd3749842016-01-20 13:40:09 -0600443}
444
445/**
446 * i40iw_clean_cqes - clean cq entries for qp
447 * @iwqp: qp ptr (user or kernel)
448 * @iwcq: cq ptr
449 */
450static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
451{
452 struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
453
454 ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
455}
456
457/**
458 * i40iw_destroy_qp - destroy qp
459 * @ibqp: qp's ib pointer also to get to device's qp address
460 */
461static int i40iw_destroy_qp(struct ib_qp *ibqp)
462{
463 struct i40iw_qp *iwqp = to_iwqp(ibqp);
464
465 iwqp->destroyed = 1;
466
467 if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
468 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
469
470 if (!iwqp->user_mode) {
471 if (iwqp->iwscq) {
472 i40iw_clean_cqes(iwqp, iwqp->iwscq);
473 if (iwqp->iwrcq != iwqp->iwscq)
474 i40iw_clean_cqes(iwqp, iwqp->iwrcq);
475 }
476 }
477
478 i40iw_rem_ref(&iwqp->ibqp);
479 return 0;
480}
481
482/**
483 * i40iw_setup_virt_qp - setup for allocation of virtual qp
484 * @dev: iwarp device
485 * @qp: qp ptr
486 * @init_info: initialize info to return
487 */
488static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
489 struct i40iw_qp *iwqp,
490 struct i40iw_qp_init_info *init_info)
491{
492 struct i40iw_pbl *iwpbl = iwqp->iwpbl;
493 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
494
495 iwqp->page = qpmr->sq_page;
496 init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
497 if (iwpbl->pbl_allocated) {
498 init_info->virtual_map = true;
499 init_info->sq_pa = qpmr->sq_pbl.idx;
500 init_info->rq_pa = qpmr->rq_pbl.idx;
501 } else {
502 init_info->sq_pa = qpmr->sq_pbl.addr;
503 init_info->rq_pa = qpmr->rq_pbl.addr;
504 }
505 return 0;
506}
507
508/**
509 * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
510 * @iwdev: iwarp device
511 * @iwqp: qp ptr (user or kernel)
512 * @info: initialize info to return
513 */
514static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
515 struct i40iw_qp *iwqp,
516 struct i40iw_qp_init_info *info)
517{
518 struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
519 u32 sqdepth, rqdepth;
520 u32 sq_size, rq_size;
521 u8 sqshift, rqshift;
522 u32 size;
523 enum i40iw_status_code status;
524 struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
525
Faisal Latifd3749842016-01-20 13:40:09 -0600526 sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
527 rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
528
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500529 status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
Faisal Latifd3749842016-01-20 13:40:09 -0600530 if (!status)
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500531 status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
Faisal Latifd3749842016-01-20 13:40:09 -0600532
533 if (status)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -0500534 return -ENOMEM;
Faisal Latifd3749842016-01-20 13:40:09 -0600535
536 sqdepth = sq_size << sqshift;
537 rqdepth = rq_size << rqshift;
538
539 size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
540 iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
541
542 ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
543 if (!ukinfo->sq_wrtrk_array)
544 return -ENOMEM;
545
546 ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
547
548 size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
549 size += (I40IW_SHADOW_AREA_SIZE << 3);
550
551 status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
552 if (status) {
553 kfree(ukinfo->sq_wrtrk_array);
554 ukinfo->sq_wrtrk_array = NULL;
555 return -ENOMEM;
556 }
557
558 ukinfo->sq = mem->va;
559 info->sq_pa = mem->pa;
560
561 ukinfo->rq = &ukinfo->sq[sqdepth];
562 info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
563
564 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
565 info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
566
567 ukinfo->sq_size = sq_size;
568 ukinfo->rq_size = rq_size;
569 ukinfo->qp_id = iwqp->ibqp.qp_num;
570 return 0;
571}
572
573/**
574 * i40iw_create_qp - create qp
575 * @ibpd: ptr of pd
576 * @init_attr: attributes for qp
577 * @udata: user data for create qp
578 */
579static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
580 struct ib_qp_init_attr *init_attr,
581 struct ib_udata *udata)
582{
583 struct i40iw_pd *iwpd = to_iwpd(ibpd);
584 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
585 struct i40iw_cqp *iwcqp = &iwdev->cqp;
586 struct i40iw_qp *iwqp;
587 struct i40iw_ucontext *ucontext;
588 struct i40iw_create_qp_req req;
589 struct i40iw_create_qp_resp uresp;
590 u32 qp_num = 0;
591 void *mem;
592 enum i40iw_status_code ret;
593 int err_code;
594 int sq_size;
595 int rq_size;
596 struct i40iw_sc_qp *qp;
597 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
598 struct i40iw_qp_init_info init_info;
599 struct i40iw_create_qp_info *qp_info;
600 struct i40iw_cqp_request *cqp_request;
601 struct cqp_commands_info *cqp_info;
602
603 struct i40iw_qp_host_ctx_info *ctx_info;
604 struct i40iwarp_offload_info *iwarp_info;
605 unsigned long flags;
606
Mustafa Ismaild5965932016-11-30 14:59:26 -0600607 if (iwdev->closing)
608 return ERR_PTR(-ENODEV);
609
Faisal Latifd3749842016-01-20 13:40:09 -0600610 if (init_attr->create_flags)
611 return ERR_PTR(-EINVAL);
612 if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
613 init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
614
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500615 if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
616 init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
617
Henry Orosco01d0b362016-11-09 21:26:39 -0600618 if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
619 init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
620
Faisal Latifd3749842016-01-20 13:40:09 -0600621 memset(&init_info, 0, sizeof(init_info));
622
623 sq_size = init_attr->cap.max_send_wr;
624 rq_size = init_attr->cap.max_recv_wr;
625
626 init_info.qp_uk_init_info.sq_size = sq_size;
627 init_info.qp_uk_init_info.rq_size = rq_size;
628 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
629 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500630 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
Faisal Latifd3749842016-01-20 13:40:09 -0600631
632 mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
633 if (!mem)
634 return ERR_PTR(-ENOMEM);
635
636 iwqp = (struct i40iw_qp *)mem;
637 qp = &iwqp->sc_qp;
638 qp->back_qp = (void *)iwqp;
639 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
640
641 iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
642
643 if (i40iw_allocate_dma_mem(dev->hw,
644 &iwqp->q2_ctx_mem,
645 I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
646 256)) {
647 i40iw_pr_err("dma_mem failed\n");
648 err_code = -ENOMEM;
649 goto error;
650 }
651
652 init_info.q2 = iwqp->q2_ctx_mem.va;
653 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
654
655 init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
656 init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
657
658 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
659 &qp_num, &iwdev->next_qp);
660 if (err_code) {
661 i40iw_pr_err("qp resource\n");
662 goto error;
663 }
664
665 iwqp->allocated_buffer = mem;
666 iwqp->iwdev = iwdev;
667 iwqp->iwpd = iwpd;
668 iwqp->ibqp.qp_num = qp_num;
669 qp = &iwqp->sc_qp;
670 iwqp->iwscq = to_iwcq(init_attr->send_cq);
671 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
672
673 iwqp->host_ctx.va = init_info.host_ctx;
674 iwqp->host_ctx.pa = init_info.host_ctx_pa;
675 iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
676
677 init_info.pd = &iwpd->sc_pd;
678 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
679 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
680
681 if (init_attr->qp_type != IB_QPT_RC) {
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -0500682 err_code = -EINVAL;
Faisal Latifd3749842016-01-20 13:40:09 -0600683 goto error;
684 }
685 if (iwdev->push_mode)
686 i40iw_alloc_push_page(iwdev, qp);
687 if (udata) {
688 err_code = ib_copy_from_udata(&req, udata, sizeof(req));
689 if (err_code) {
690 i40iw_pr_err("ib_copy_from_data\n");
691 goto error;
692 }
693 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
694 if (ibpd->uobject && ibpd->uobject->context) {
695 iwqp->user_mode = 1;
696 ucontext = to_ucontext(ibpd->uobject->context);
697
698 if (req.user_wqe_buffers) {
699 spin_lock_irqsave(
700 &ucontext->qp_reg_mem_list_lock, flags);
701 iwqp->iwpbl = i40iw_get_pbl(
702 (unsigned long)req.user_wqe_buffers,
703 &ucontext->qp_reg_mem_list);
704 spin_unlock_irqrestore(
705 &ucontext->qp_reg_mem_list_lock, flags);
706
707 if (!iwqp->iwpbl) {
708 err_code = -ENODATA;
709 i40iw_pr_err("no pbl info\n");
710 goto error;
711 }
712 }
713 }
714 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
715 } else {
716 err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
717 }
718
719 if (err_code) {
720 i40iw_pr_err("setup qp failed\n");
721 goto error;
722 }
723
724 init_info.type = I40IW_QP_TYPE_IWARP;
725 ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
726 if (ret) {
727 err_code = -EPROTO;
728 i40iw_pr_err("qp_init fail\n");
729 goto error;
730 }
731 ctx_info = &iwqp->ctx_info;
732 iwarp_info = &iwqp->iwarp_info;
733 iwarp_info->rd_enable = true;
734 iwarp_info->wr_rdresp_en = true;
Ismail, Mustafab7aee852016-04-18 10:33:06 -0500735 if (!iwqp->user_mode) {
736 iwarp_info->fast_reg_en = true;
Faisal Latifd3749842016-01-20 13:40:09 -0600737 iwarp_info->priv_mode_en = true;
Ismail, Mustafab7aee852016-04-18 10:33:06 -0500738 }
Faisal Latifd3749842016-01-20 13:40:09 -0600739 iwarp_info->ddp_ver = 1;
740 iwarp_info->rdmap_ver = 1;
741
742 ctx_info->iwarp_info_valid = true;
743 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
744 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
745 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
746 ctx_info->push_mode_en = false;
747 } else {
748 ctx_info->push_mode_en = true;
749 ctx_info->push_idx = qp->push_idx;
750 }
751
752 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
753 (u64 *)iwqp->host_ctx.va,
754 ctx_info);
755 ctx_info->iwarp_info_valid = false;
756 cqp_request = i40iw_get_cqp_request(iwcqp, true);
757 if (!cqp_request) {
758 err_code = -ENOMEM;
759 goto error;
760 }
761 cqp_info = &cqp_request->info;
762 qp_info = &cqp_request->info.in.u.qp_create.info;
763
764 memset(qp_info, 0, sizeof(*qp_info));
765
766 qp_info->cq_num_valid = true;
767 qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
768
769 cqp_info->cqp_cmd = OP_QP_CREATE;
770 cqp_info->post_sq = 1;
771 cqp_info->in.u.qp_create.qp = qp;
772 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
773 ret = i40iw_handle_cqp_op(iwdev, cqp_request);
774 if (ret) {
775 i40iw_pr_err("CQP-OP QP create fail");
776 err_code = -EACCES;
777 goto error;
778 }
779
780 i40iw_add_ref(&iwqp->ibqp);
781 spin_lock_init(&iwqp->lock);
782 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
783 iwdev->qp_table[qp_num] = iwqp;
784 i40iw_add_pdusecount(iwqp->iwpd);
Mustafa Ismaild5965932016-11-30 14:59:26 -0600785 i40iw_add_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -0600786 if (ibpd->uobject && udata) {
787 memset(&uresp, 0, sizeof(uresp));
788 uresp.actual_sq_size = sq_size;
789 uresp.actual_rq_size = rq_size;
790 uresp.qp_id = qp_num;
791 uresp.push_idx = qp->push_idx;
792 err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
793 if (err_code) {
794 i40iw_pr_err("copy_to_udata failed\n");
795 i40iw_destroy_qp(&iwqp->ibqp);
796 /* let the completion of the qp destroy free the qp */
797 return ERR_PTR(err_code);
798 }
799 }
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -0500800 init_completion(&iwqp->sq_drained);
801 init_completion(&iwqp->rq_drained);
Faisal Latifd3749842016-01-20 13:40:09 -0600802
803 return &iwqp->ibqp;
804error:
805 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
Faisal Latifd3749842016-01-20 13:40:09 -0600806 return ERR_PTR(err_code);
807}
808
809/**
810 * i40iw_query - query qp attributes
811 * @ibqp: qp pointer
812 * @attr: attributes pointer
813 * @attr_mask: Not used
814 * @init_attr: qp attributes to return
815 */
816static int i40iw_query_qp(struct ib_qp *ibqp,
817 struct ib_qp_attr *attr,
818 int attr_mask,
819 struct ib_qp_init_attr *init_attr)
820{
821 struct i40iw_qp *iwqp = to_iwqp(ibqp);
822 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
823
824 attr->qp_access_flags = 0;
825 attr->cap.max_send_wr = qp->qp_uk.sq_size;
826 attr->cap.max_recv_wr = qp->qp_uk.rq_size;
Faisal Latifd3749842016-01-20 13:40:09 -0600827 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
Henry Orosco01d0b362016-11-09 21:26:39 -0600828 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
829 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
Faisal Latifd3749842016-01-20 13:40:09 -0600830 init_attr->event_handler = iwqp->ibqp.event_handler;
831 init_attr->qp_context = iwqp->ibqp.qp_context;
832 init_attr->send_cq = iwqp->ibqp.send_cq;
833 init_attr->recv_cq = iwqp->ibqp.recv_cq;
834 init_attr->srq = iwqp->ibqp.srq;
835 init_attr->cap = attr->cap;
836 return 0;
837}
838
839/**
840 * i40iw_hw_modify_qp - setup cqp for modify qp
841 * @iwdev: iwarp device
842 * @iwqp: qp ptr (user or kernel)
843 * @info: info for modify qp
844 * @wait: flag to wait or not for modify qp completion
845 */
846void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
847 struct i40iw_modify_qp_info *info, bool wait)
848{
849 enum i40iw_status_code status;
850 struct i40iw_cqp_request *cqp_request;
851 struct cqp_commands_info *cqp_info;
852 struct i40iw_modify_qp_info *m_info;
853
854 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
855 if (!cqp_request)
856 return;
857
858 cqp_info = &cqp_request->info;
859 m_info = &cqp_info->in.u.qp_modify.info;
860 memcpy(m_info, info, sizeof(*m_info));
861 cqp_info->cqp_cmd = OP_QP_MODIFY;
862 cqp_info->post_sq = 1;
863 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
864 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
865 status = i40iw_handle_cqp_op(iwdev, cqp_request);
866 if (status)
867 i40iw_pr_err("CQP-OP Modify QP fail");
868}
869
870/**
871 * i40iw_modify_qp - modify qp request
872 * @ibqp: qp's pointer for modify
873 * @attr: access attributes
874 * @attr_mask: state mask
875 * @udata: user data
876 */
877int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
878 int attr_mask, struct ib_udata *udata)
879{
880 struct i40iw_qp *iwqp = to_iwqp(ibqp);
881 struct i40iw_device *iwdev = iwqp->iwdev;
882 struct i40iw_qp_host_ctx_info *ctx_info;
883 struct i40iwarp_offload_info *iwarp_info;
884 struct i40iw_modify_qp_info info;
885 u8 issue_modify_qp = 0;
886 u8 dont_wait = 0;
887 u32 err;
888 unsigned long flags;
889
890 memset(&info, 0, sizeof(info));
891 ctx_info = &iwqp->ctx_info;
892 iwarp_info = &iwqp->iwarp_info;
893
894 spin_lock_irqsave(&iwqp->lock, flags);
895
896 if (attr_mask & IB_QP_STATE) {
Mustafa Ismaild5965932016-11-30 14:59:26 -0600897 if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
898 err = -EINVAL;
899 goto exit;
900 }
901
Faisal Latifd3749842016-01-20 13:40:09 -0600902 switch (attr->qp_state) {
903 case IB_QPS_INIT:
904 case IB_QPS_RTR:
905 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
906 err = -EINVAL;
907 goto exit;
908 }
909 if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
910 info.next_iwarp_state = I40IW_QP_STATE_IDLE;
911 issue_modify_qp = 1;
912 }
913 break;
914 case IB_QPS_RTS:
915 if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
916 (!iwqp->cm_id)) {
917 err = -EINVAL;
918 goto exit;
919 }
920
921 issue_modify_qp = 1;
922 iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
923 iwqp->hte_added = 1;
924 info.next_iwarp_state = I40IW_QP_STATE_RTS;
925 info.tcp_ctx_valid = true;
926 info.ord_valid = true;
927 info.arp_cache_idx_valid = true;
928 info.cq_num_valid = true;
929 break;
930 case IB_QPS_SQD:
931 if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
932 err = 0;
933 goto exit;
934 }
935 if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
936 (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
937 err = 0;
938 goto exit;
939 }
940 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
941 err = -EINVAL;
942 goto exit;
943 }
944 info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
945 issue_modify_qp = 1;
946 break;
947 case IB_QPS_SQE:
948 if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
949 err = -EINVAL;
950 goto exit;
951 }
952 info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
953 issue_modify_qp = 1;
954 break;
955 case IB_QPS_ERR:
956 case IB_QPS_RESET:
957 if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
958 err = -EINVAL;
959 goto exit;
960 }
961 if (iwqp->sc_qp.term_flags)
962 del_timer(&iwqp->terminate_timer);
963 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
964 if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
965 iwdev->iw_status &&
966 (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
967 info.reset_tcp_conn = true;
968 else
969 dont_wait = 1;
970 issue_modify_qp = 1;
971 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
972 break;
973 default:
974 err = -EINVAL;
975 goto exit;
976 }
977
978 iwqp->ibqp_state = attr->qp_state;
979
980 if (issue_modify_qp)
981 iwqp->iwarp_state = info.next_iwarp_state;
982 else
983 info.next_iwarp_state = iwqp->iwarp_state;
984 }
985 if (attr_mask & IB_QP_ACCESS_FLAGS) {
986 ctx_info->iwarp_info_valid = true;
987 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
988 iwarp_info->wr_rdresp_en = true;
989 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
990 iwarp_info->wr_rdresp_en = true;
991 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
992 iwarp_info->rd_enable = true;
993 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
994 iwarp_info->bind_en = true;
995
996 if (iwqp->user_mode) {
997 iwarp_info->rd_enable = true;
998 iwarp_info->wr_rdresp_en = true;
999 iwarp_info->priv_mode_en = false;
1000 }
1001 }
1002
1003 if (ctx_info->iwarp_info_valid) {
1004 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1005 int ret;
1006
1007 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1008 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1009 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
1010 (u64 *)iwqp->host_ctx.va,
1011 ctx_info);
1012 if (ret) {
1013 i40iw_pr_err("setting QP context\n");
1014 err = -EINVAL;
1015 goto exit;
1016 }
1017 }
1018
1019 spin_unlock_irqrestore(&iwqp->lock, flags);
1020
1021 if (issue_modify_qp)
1022 i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
1023
1024 if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
1025 if (dont_wait) {
1026 if (iwqp->cm_id && iwqp->hw_tcp_state) {
1027 spin_lock_irqsave(&iwqp->lock, flags);
1028 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1029 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1030 spin_unlock_irqrestore(&iwqp->lock, flags);
1031 }
1032 }
1033 }
1034 return 0;
1035exit:
1036 spin_unlock_irqrestore(&iwqp->lock, flags);
1037 return err;
1038}
1039
1040/**
1041 * cq_free_resources - free up recources for cq
1042 * @iwdev: iwarp device
1043 * @iwcq: cq ptr
1044 */
1045static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1046{
1047 struct i40iw_sc_cq *cq = &iwcq->sc_cq;
1048
1049 if (!iwcq->user_mode)
1050 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1051 i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1052}
1053
1054/**
1055 * cq_wq_destroy - send cq destroy cqp
1056 * @iwdev: iwarp device
1057 * @cq: hardware control cq
1058 */
1059static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
1060{
1061 enum i40iw_status_code status;
1062 struct i40iw_cqp_request *cqp_request;
1063 struct cqp_commands_info *cqp_info;
1064
1065 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1066 if (!cqp_request)
1067 return;
1068
1069 cqp_info = &cqp_request->info;
1070
1071 cqp_info->cqp_cmd = OP_CQ_DESTROY;
1072 cqp_info->post_sq = 1;
1073 cqp_info->in.u.cq_destroy.cq = cq;
1074 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1075 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1076 if (status)
1077 i40iw_pr_err("CQP-OP Destroy QP fail");
1078}
1079
1080/**
1081 * i40iw_destroy_cq - destroy cq
1082 * @ib_cq: cq pointer
1083 */
1084static int i40iw_destroy_cq(struct ib_cq *ib_cq)
1085{
1086 struct i40iw_cq *iwcq;
1087 struct i40iw_device *iwdev;
1088 struct i40iw_sc_cq *cq;
1089
1090 if (!ib_cq) {
1091 i40iw_pr_err("ib_cq == NULL\n");
1092 return 0;
1093 }
1094
1095 iwcq = to_iwcq(ib_cq);
1096 iwdev = to_iwdev(ib_cq->device);
1097 cq = &iwcq->sc_cq;
1098 cq_wq_destroy(iwdev, cq);
1099 cq_free_resources(iwdev, iwcq);
1100 kfree(iwcq);
Mustafa Ismaild5965932016-11-30 14:59:26 -06001101 i40iw_rem_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -06001102 return 0;
1103}
1104
1105/**
1106 * i40iw_create_cq - create cq
1107 * @ibdev: device pointer from stack
1108 * @attr: attributes for cq
1109 * @context: user context created during alloc
1110 * @udata: user data
1111 */
1112static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1113 const struct ib_cq_init_attr *attr,
1114 struct ib_ucontext *context,
1115 struct ib_udata *udata)
1116{
1117 struct i40iw_device *iwdev = to_iwdev(ibdev);
1118 struct i40iw_cq *iwcq;
1119 struct i40iw_pbl *iwpbl;
1120 u32 cq_num = 0;
1121 struct i40iw_sc_cq *cq;
1122 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1123 struct i40iw_cq_init_info info;
1124 enum i40iw_status_code status;
1125 struct i40iw_cqp_request *cqp_request;
1126 struct cqp_commands_info *cqp_info;
1127 struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1128 unsigned long flags;
1129 int err_code;
1130 int entries = attr->cqe;
1131
Mustafa Ismaild5965932016-11-30 14:59:26 -06001132 if (iwdev->closing)
1133 return ERR_PTR(-ENODEV);
1134
Faisal Latifd3749842016-01-20 13:40:09 -06001135 if (entries > iwdev->max_cqe)
1136 return ERR_PTR(-EINVAL);
1137
1138 iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1139 if (!iwcq)
1140 return ERR_PTR(-ENOMEM);
1141
1142 memset(&info, 0, sizeof(info));
1143
1144 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1145 iwdev->max_cq, &cq_num,
1146 &iwdev->next_cq);
1147 if (err_code)
1148 goto error;
1149
1150 cq = &iwcq->sc_cq;
1151 cq->back_cq = (void *)iwcq;
1152 spin_lock_init(&iwcq->lock);
1153
1154 info.dev = dev;
1155 ukinfo->cq_size = max(entries, 4);
1156 ukinfo->cq_id = cq_num;
1157 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1158 info.ceqe_mask = 0;
Henry Oroscoe69c5092016-11-09 21:24:48 -06001159 if (attr->comp_vector < iwdev->ceqs_count)
1160 info.ceq_id = attr->comp_vector;
Faisal Latifd3749842016-01-20 13:40:09 -06001161 info.ceq_id_valid = true;
1162 info.ceqe_mask = 1;
1163 info.type = I40IW_CQ_TYPE_IWARP;
1164 if (context) {
1165 struct i40iw_ucontext *ucontext;
1166 struct i40iw_create_cq_req req;
1167 struct i40iw_cq_mr *cqmr;
1168
1169 memset(&req, 0, sizeof(req));
1170 iwcq->user_mode = true;
1171 ucontext = to_ucontext(context);
1172 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
1173 goto cq_free_resources;
1174
1175 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1176 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
1177 &ucontext->cq_reg_mem_list);
1178 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1179 if (!iwpbl) {
1180 err_code = -EPROTO;
1181 goto cq_free_resources;
1182 }
1183
1184 iwcq->iwpbl = iwpbl;
1185 iwcq->cq_mem_size = 0;
1186 cqmr = &iwpbl->cq_mr;
1187 info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
1188 if (iwpbl->pbl_allocated) {
1189 info.virtual_map = true;
1190 info.pbl_chunk_size = 1;
1191 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1192 } else {
1193 info.cq_base_pa = cqmr->cq_pbl.addr;
1194 }
1195 } else {
1196 /* Kmode allocations */
1197 int rsize;
1198 int shadow;
1199
1200 rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
1201 rsize = round_up(rsize, 256);
1202 shadow = I40IW_SHADOW_AREA_SIZE << 3;
1203 status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
1204 rsize + shadow, 256);
1205 if (status) {
1206 err_code = -ENOMEM;
1207 goto cq_free_resources;
1208 }
1209 ukinfo->cq_base = iwcq->kmem.va;
1210 info.cq_base_pa = iwcq->kmem.pa;
1211 info.shadow_area_pa = info.cq_base_pa + rsize;
1212 ukinfo->shadow_area = iwcq->kmem.va + rsize;
1213 }
1214
1215 if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
1216 i40iw_pr_err("init cq fail\n");
1217 err_code = -EPROTO;
1218 goto cq_free_resources;
1219 }
1220
1221 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1222 if (!cqp_request) {
1223 err_code = -ENOMEM;
1224 goto cq_free_resources;
1225 }
1226
1227 cqp_info = &cqp_request->info;
1228 cqp_info->cqp_cmd = OP_CQ_CREATE;
1229 cqp_info->post_sq = 1;
1230 cqp_info->in.u.cq_create.cq = cq;
1231 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1232 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1233 if (status) {
1234 i40iw_pr_err("CQP-OP Create QP fail");
1235 err_code = -EPROTO;
1236 goto cq_free_resources;
1237 }
1238
1239 if (context) {
1240 struct i40iw_create_cq_resp resp;
1241
1242 memset(&resp, 0, sizeof(resp));
1243 resp.cq_id = info.cq_uk_init_info.cq_id;
1244 resp.cq_size = info.cq_uk_init_info.cq_size;
1245 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1246 i40iw_pr_err("copy to user data\n");
1247 err_code = -EPROTO;
1248 goto cq_destroy;
1249 }
1250 }
1251
Mustafa Ismaild5965932016-11-30 14:59:26 -06001252 i40iw_add_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -06001253 return (struct ib_cq *)iwcq;
1254
1255cq_destroy:
1256 cq_wq_destroy(iwdev, cq);
1257cq_free_resources:
1258 cq_free_resources(iwdev, iwcq);
1259error:
1260 kfree(iwcq);
1261 return ERR_PTR(err_code);
1262}
1263
1264/**
1265 * i40iw_get_user_access - get hw access from IB access
1266 * @acc: IB access to return hw access
1267 */
1268static inline u16 i40iw_get_user_access(int acc)
1269{
1270 u16 access = 0;
1271
1272 access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
1273 access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
1274 access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
1275 access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
1276 return access;
1277}
1278
1279/**
1280 * i40iw_free_stag - free stag resource
1281 * @iwdev: iwarp device
1282 * @stag: stag to free
1283 */
1284static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1285{
1286 u32 stag_idx;
1287
1288 stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1289 i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
Mustafa Ismaild5965932016-11-30 14:59:26 -06001290 i40iw_rem_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -06001291}
1292
1293/**
1294 * i40iw_create_stag - create random stag
1295 * @iwdev: iwarp device
1296 */
1297static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1298{
1299 u32 stag = 0;
1300 u32 stag_index = 0;
1301 u32 next_stag_index;
1302 u32 driver_key;
1303 u32 random;
1304 u8 consumer_key;
1305 int ret;
1306
1307 get_random_bytes(&random, sizeof(random));
1308 consumer_key = (u8)random;
1309
1310 driver_key = random & ~iwdev->mr_stagmask;
1311 next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1312 next_stag_index %= iwdev->max_mr;
1313
1314 ret = i40iw_alloc_resource(iwdev,
1315 iwdev->allocated_mrs, iwdev->max_mr,
1316 &stag_index, &next_stag_index);
1317 if (!ret) {
1318 stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
1319 stag |= driver_key;
1320 stag += (u32)consumer_key;
Mustafa Ismaild5965932016-11-30 14:59:26 -06001321 i40iw_add_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -06001322 }
1323 return stag;
1324}
1325
1326/**
1327 * i40iw_next_pbl_addr - Get next pbl address
Faisal Latifd3749842016-01-20 13:40:09 -06001328 * @pbl: pointer to a pble
1329 * @pinfo: info pointer
1330 * @idx: index
1331 */
Henry Oroscof26c7c82016-11-30 14:57:40 -06001332static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
Faisal Latifd3749842016-01-20 13:40:09 -06001333 struct i40iw_pble_info **pinfo,
1334 u32 *idx)
1335{
1336 *idx += 1;
1337 if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
1338 return ++pbl;
1339 *idx = 0;
1340 (*pinfo)++;
1341 return (u64 *)(*pinfo)->addr;
1342}
1343
1344/**
1345 * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1346 * @iwmr: iwmr for IB's user page addresses
1347 * @pbl: ple pointer to save 1 level or 0 level pble
1348 * @level: indicated level 0, 1 or 2
1349 */
1350static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
1351 u64 *pbl,
1352 enum i40iw_pble_level level)
1353{
1354 struct ib_umem *region = iwmr->region;
1355 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1356 int chunk_pages, entry, pg_shift, i;
1357 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1358 struct i40iw_pble_info *pinfo;
1359 struct scatterlist *sg;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001360 u64 pg_addr = 0;
Faisal Latifd3749842016-01-20 13:40:09 -06001361 u32 idx = 0;
1362
1363 pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001364
Faisal Latifd3749842016-01-20 13:40:09 -06001365 pg_shift = ffs(region->page_size) - 1;
1366 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1367 chunk_pages = sg_dma_len(sg) >> pg_shift;
1368 if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
1369 !iwpbl->qp_mr.sq_page)
1370 iwpbl->qp_mr.sq_page = sg_page(sg);
1371 for (i = 0; i < chunk_pages; i++) {
Henry Oroscof26c7c82016-11-30 14:57:40 -06001372 pg_addr = sg_dma_address(sg) + region->page_size * i;
1373
1374 if ((entry + i) == 0)
1375 *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
1376 else if (!(pg_addr & ~iwmr->page_msk))
1377 *pbl = cpu_to_le64(pg_addr);
1378 else
1379 continue;
1380 pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
1381 }
1382 }
1383}
1384
1385/**
1386 * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
1387 * @addr: virtual address
1388 * @iwmr: mr pointer for this memory registration
1389 */
1390static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
1391{
1392 struct vm_area_struct *vma;
1393 struct hstate *h;
1394
1395 vma = find_vma(current->mm, addr);
1396 if (vma && is_vm_hugetlb_page(vma)) {
1397 h = hstate_vma(vma);
1398 if (huge_page_size(h) == 0x200000) {
1399 iwmr->page_size = huge_page_size(h);
1400 iwmr->page_msk = huge_page_mask(h);
Faisal Latifd3749842016-01-20 13:40:09 -06001401 }
1402 }
1403}
1404
1405/**
Henry Oroscob6a529d2016-11-30 14:56:14 -06001406 * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
1407 * @arr: lvl1 pbl array
1408 * @npages: page count
1409 * pg_size: page size
1410 *
1411 */
1412static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1413{
1414 u32 pg_idx;
1415
1416 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1417 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1418 return false;
1419 }
1420 return true;
1421}
1422
1423/**
1424 * i40iw_check_mr_contiguous - check if MR is physically contiguous
1425 * @palloc: pbl allocation struct
1426 * pg_size: page size
1427 */
1428static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
1429{
1430 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
1431 struct i40iw_pble_info *leaf = lvl2->leaf;
1432 u64 *arr = NULL;
1433 u64 *start_addr = NULL;
1434 int i;
1435 bool ret;
1436
1437 if (palloc->level == I40IW_LEVEL_1) {
1438 arr = (u64 *)palloc->level1.addr;
1439 ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
1440 return ret;
1441 }
1442
1443 start_addr = (u64 *)leaf->addr;
1444
1445 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1446 arr = (u64 *)leaf->addr;
1447 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1448 return false;
1449 ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
1450 if (!ret)
1451 return false;
1452 }
1453
1454 return true;
1455}
1456
1457/**
Faisal Latifd3749842016-01-20 13:40:09 -06001458 * i40iw_setup_pbles - copy user pg address to pble's
1459 * @iwdev: iwarp device
1460 * @iwmr: mr pointer for this memory registration
Henry Oroscob6a529d2016-11-30 14:56:14 -06001461 * @use_pbles: flag if to use pble's
Faisal Latifd3749842016-01-20 13:40:09 -06001462 */
1463static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1464 struct i40iw_mr *iwmr,
1465 bool use_pbles)
1466{
1467 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1468 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1469 struct i40iw_pble_info *pinfo;
1470 u64 *pbl;
1471 enum i40iw_status_code status;
1472 enum i40iw_pble_level level = I40IW_LEVEL_1;
1473
Faisal Latifd3749842016-01-20 13:40:09 -06001474 if (use_pbles) {
1475 mutex_lock(&iwdev->pbl_mutex);
1476 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1477 mutex_unlock(&iwdev->pbl_mutex);
1478 if (status)
1479 return -ENOMEM;
1480
1481 iwpbl->pbl_allocated = true;
1482 level = palloc->level;
1483 pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
1484 pbl = (u64 *)pinfo->addr;
1485 } else {
1486 pbl = iwmr->pgaddrmem;
1487 }
1488
1489 i40iw_copy_user_pgaddrs(iwmr, pbl, level);
Henry Oroscob6a529d2016-11-30 14:56:14 -06001490
1491 if (use_pbles)
1492 iwmr->pgaddrmem[0] = *pbl;
1493
Faisal Latifd3749842016-01-20 13:40:09 -06001494 return 0;
1495}
1496
1497/**
1498 * i40iw_handle_q_mem - handle memory for qp and cq
1499 * @iwdev: iwarp device
1500 * @req: information for q memory management
1501 * @iwpbl: pble struct
1502 * @use_pbles: flag to use pble
1503 */
1504static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1505 struct i40iw_mem_reg_req *req,
1506 struct i40iw_pbl *iwpbl,
1507 bool use_pbles)
1508{
1509 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1510 struct i40iw_mr *iwmr = iwpbl->iwmr;
1511 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
1512 struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
1513 struct i40iw_hmc_pble *hmc_p;
1514 u64 *arr = iwmr->pgaddrmem;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001515 u32 pg_size;
Faisal Latifd3749842016-01-20 13:40:09 -06001516 int err;
1517 int total;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001518 bool ret = true;
Faisal Latifd3749842016-01-20 13:40:09 -06001519
1520 total = req->sq_pages + req->rq_pages + req->cq_pages;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001521 pg_size = iwmr->page_size;
Faisal Latifd3749842016-01-20 13:40:09 -06001522
1523 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1524 if (err)
1525 return err;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001526
Faisal Latifd3749842016-01-20 13:40:09 -06001527 if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
1528 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1529 iwpbl->pbl_allocated = false;
1530 return -ENOMEM;
1531 }
1532
1533 if (use_pbles)
1534 arr = (u64 *)palloc->level1.addr;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001535
1536 if (iwmr->type == IW_MEMREG_TYPE_QP) {
Faisal Latifd3749842016-01-20 13:40:09 -06001537 hmc_p = &qpmr->sq_pbl;
1538 qpmr->shadow = (dma_addr_t)arr[total];
Henry Oroscob6a529d2016-11-30 14:56:14 -06001539
Faisal Latifd3749842016-01-20 13:40:09 -06001540 if (use_pbles) {
Henry Oroscob6a529d2016-11-30 14:56:14 -06001541 ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
1542 if (ret)
1543 ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
1544 }
1545
1546 if (!ret) {
Faisal Latifd3749842016-01-20 13:40:09 -06001547 hmc_p->idx = palloc->level1.idx;
1548 hmc_p = &qpmr->rq_pbl;
1549 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1550 } else {
1551 hmc_p->addr = arr[0];
1552 hmc_p = &qpmr->rq_pbl;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001553 hmc_p->addr = arr[req->sq_pages];
Faisal Latifd3749842016-01-20 13:40:09 -06001554 }
1555 } else { /* CQ */
1556 hmc_p = &cqmr->cq_pbl;
1557 cqmr->shadow = (dma_addr_t)arr[total];
Henry Oroscob6a529d2016-11-30 14:56:14 -06001558
Faisal Latifd3749842016-01-20 13:40:09 -06001559 if (use_pbles)
Henry Oroscob6a529d2016-11-30 14:56:14 -06001560 ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
1561
1562 if (!ret)
Faisal Latifd3749842016-01-20 13:40:09 -06001563 hmc_p->idx = palloc->level1.idx;
1564 else
1565 hmc_p->addr = arr[0];
1566 }
Henry Oroscob6a529d2016-11-30 14:56:14 -06001567
1568 if (use_pbles && ret) {
1569 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1570 iwpbl->pbl_allocated = false;
1571 }
1572
Faisal Latifd3749842016-01-20 13:40:09 -06001573 return err;
1574}
1575
1576/**
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001577 * i40iw_hw_alloc_stag - cqp command to allocate stag
1578 * @iwdev: iwarp device
1579 * @iwmr: iwarp mr pointer
1580 */
1581static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1582{
1583 struct i40iw_allocate_stag_info *info;
1584 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1585 enum i40iw_status_code status;
1586 int err = 0;
1587 struct i40iw_cqp_request *cqp_request;
1588 struct cqp_commands_info *cqp_info;
1589
1590 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1591 if (!cqp_request)
1592 return -ENOMEM;
1593
1594 cqp_info = &cqp_request->info;
1595 info = &cqp_info->in.u.alloc_stag.info;
1596 memset(info, 0, sizeof(*info));
1597 info->page_size = PAGE_SIZE;
1598 info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1599 info->pd_id = iwpd->sc_pd.pd_id;
1600 info->total_len = iwmr->length;
Shiraz Saleem8e0e7ae2016-06-27 16:52:14 -05001601 info->remote_access = true;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001602 cqp_info->cqp_cmd = OP_ALLOC_STAG;
1603 cqp_info->post_sq = 1;
1604 cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1605 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1606
1607 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1608 if (status) {
1609 err = -ENOMEM;
1610 i40iw_pr_err("CQP-OP MR Reg fail");
1611 }
1612 return err;
1613}
1614
1615/**
1616 * i40iw_alloc_mr - register stag for fast memory registration
1617 * @pd: ibpd pointer
1618 * @mr_type: memory for stag registrion
1619 * @max_num_sg: man number of pages
1620 */
1621static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
1622 enum ib_mr_type mr_type,
1623 u32 max_num_sg)
1624{
1625 struct i40iw_pd *iwpd = to_iwpd(pd);
1626 struct i40iw_device *iwdev = to_iwdev(pd->device);
1627 struct i40iw_pble_alloc *palloc;
1628 struct i40iw_pbl *iwpbl;
1629 struct i40iw_mr *iwmr;
1630 enum i40iw_status_code status;
1631 u32 stag;
1632 int err_code = -ENOMEM;
1633
1634 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1635 if (!iwmr)
1636 return ERR_PTR(-ENOMEM);
1637
1638 stag = i40iw_create_stag(iwdev);
1639 if (!stag) {
1640 err_code = -EOVERFLOW;
1641 goto err;
1642 }
1643 iwmr->stag = stag;
1644 iwmr->ibmr.rkey = stag;
1645 iwmr->ibmr.lkey = stag;
1646 iwmr->ibmr.pd = pd;
1647 iwmr->ibmr.device = pd->device;
1648 iwpbl = &iwmr->iwpbl;
1649 iwpbl->iwmr = iwmr;
1650 iwmr->type = IW_MEMREG_TYPE_MEM;
1651 palloc = &iwpbl->pble_alloc;
1652 iwmr->page_cnt = max_num_sg;
1653 mutex_lock(&iwdev->pbl_mutex);
1654 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1655 mutex_unlock(&iwdev->pbl_mutex);
Faisal Latifee23abd2016-06-14 16:54:17 -05001656 if (status)
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001657 goto err1;
1658
1659 if (palloc->level != I40IW_LEVEL_1)
1660 goto err2;
1661 err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1662 if (err_code)
1663 goto err2;
1664 iwpbl->pbl_allocated = true;
1665 i40iw_add_pdusecount(iwpd);
1666 return &iwmr->ibmr;
1667err2:
1668 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1669err1:
1670 i40iw_free_stag(iwdev, stag);
1671err:
1672 kfree(iwmr);
1673 return ERR_PTR(err_code);
1674}
1675
1676/**
1677 * i40iw_set_page - populate pbl list for fmr
1678 * @ibmr: ib mem to access iwarp mr pointer
1679 * @addr: page dma address fro pbl list
1680 */
1681static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
1682{
1683 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1684 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1685 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1686 u64 *pbl;
1687
1688 if (unlikely(iwmr->npages == iwmr->page_cnt))
1689 return -ENOMEM;
1690
1691 pbl = (u64 *)palloc->level1.addr;
1692 pbl[iwmr->npages++] = cpu_to_le64(addr);
1693 return 0;
1694}
1695
1696/**
1697 * i40iw_map_mr_sg - map of sg list for fmr
1698 * @ibmr: ib mem to access iwarp mr pointer
1699 * @sg: scatter gather list for fmr
1700 * @sg_nents: number of sg pages
1701 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001702static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001703 int sg_nents, unsigned int *sg_offset)
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001704{
1705 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1706
1707 iwmr->npages = 0;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001708 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001709}
1710
1711/**
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05001712 * i40iw_drain_sq - drain the send queue
1713 * @ibqp: ib qp pointer
1714 */
1715static void i40iw_drain_sq(struct ib_qp *ibqp)
1716{
1717 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1718 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1719
1720 if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
1721 wait_for_completion(&iwqp->sq_drained);
1722}
1723
1724/**
1725 * i40iw_drain_rq - drain the receive queue
1726 * @ibqp: ib qp pointer
1727 */
1728static void i40iw_drain_rq(struct ib_qp *ibqp)
1729{
1730 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1731 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1732
1733 if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
1734 wait_for_completion(&iwqp->rq_drained);
1735}
1736
1737/**
Faisal Latifd3749842016-01-20 13:40:09 -06001738 * i40iw_hwreg_mr - send cqp command for memory registration
1739 * @iwdev: iwarp device
1740 * @iwmr: iwarp mr pointer
1741 * @access: access for MR
1742 */
1743static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1744 struct i40iw_mr *iwmr,
1745 u16 access)
1746{
1747 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1748 struct i40iw_reg_ns_stag_info *stag_info;
1749 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1750 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1751 enum i40iw_status_code status;
1752 int err = 0;
1753 struct i40iw_cqp_request *cqp_request;
1754 struct cqp_commands_info *cqp_info;
1755
1756 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1757 if (!cqp_request)
1758 return -ENOMEM;
1759
1760 cqp_info = &cqp_request->info;
1761 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1762 memset(stag_info, 0, sizeof(*stag_info));
1763 stag_info->va = (void *)(unsigned long)iwpbl->user_base;
1764 stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1765 stag_info->stag_key = (u8)iwmr->stag;
1766 stag_info->total_len = iwmr->length;
1767 stag_info->access_rights = access;
1768 stag_info->pd_id = iwpd->sc_pd.pd_id;
1769 stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001770 stag_info->page_size = iwmr->page_size;
Faisal Latifd3749842016-01-20 13:40:09 -06001771
Henry Oroscob6a529d2016-11-30 14:56:14 -06001772 if (iwpbl->pbl_allocated) {
Faisal Latifd3749842016-01-20 13:40:09 -06001773 if (palloc->level == I40IW_LEVEL_1) {
1774 stag_info->first_pm_pbl_index = palloc->level1.idx;
1775 stag_info->chunk_size = 1;
1776 } else {
1777 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1778 stag_info->chunk_size = 3;
1779 }
1780 } else {
1781 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1782 }
1783
1784 cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
1785 cqp_info->post_sq = 1;
1786 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1787 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1788
1789 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1790 if (status) {
1791 err = -ENOMEM;
1792 i40iw_pr_err("CQP-OP MR Reg fail");
1793 }
1794 return err;
1795}
1796
1797/**
1798 * i40iw_reg_user_mr - Register a user memory region
1799 * @pd: ptr of pd
1800 * @start: virtual start address
1801 * @length: length of mr
1802 * @virt: virtual address
1803 * @acc: access of mr
1804 * @udata: user data
1805 */
1806static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1807 u64 start,
1808 u64 length,
1809 u64 virt,
1810 int acc,
1811 struct ib_udata *udata)
1812{
1813 struct i40iw_pd *iwpd = to_iwpd(pd);
1814 struct i40iw_device *iwdev = to_iwdev(pd->device);
1815 struct i40iw_ucontext *ucontext;
1816 struct i40iw_pble_alloc *palloc;
1817 struct i40iw_pbl *iwpbl;
1818 struct i40iw_mr *iwmr;
1819 struct ib_umem *region;
1820 struct i40iw_mem_reg_req req;
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001821 u64 pbl_depth = 0;
Faisal Latifd3749842016-01-20 13:40:09 -06001822 u32 stag = 0;
1823 u16 access;
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001824 u64 region_length;
Faisal Latifd3749842016-01-20 13:40:09 -06001825 bool use_pbles = false;
1826 unsigned long flags;
1827 int err = -ENOSYS;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001828 int ret;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001829 int pg_shift;
Faisal Latifd3749842016-01-20 13:40:09 -06001830
Mustafa Ismaild5965932016-11-30 14:59:26 -06001831 if (iwdev->closing)
1832 return ERR_PTR(-ENODEV);
1833
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001834 if (length > I40IW_MAX_MR_SIZE)
1835 return ERR_PTR(-EINVAL);
Faisal Latifd3749842016-01-20 13:40:09 -06001836 region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
1837 if (IS_ERR(region))
1838 return (struct ib_mr *)region;
1839
1840 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
1841 ib_umem_release(region);
1842 return ERR_PTR(-EFAULT);
1843 }
1844
1845 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1846 if (!iwmr) {
1847 ib_umem_release(region);
1848 return ERR_PTR(-ENOMEM);
1849 }
1850
1851 iwpbl = &iwmr->iwpbl;
1852 iwpbl->iwmr = iwmr;
1853 iwmr->region = region;
1854 iwmr->ibmr.pd = pd;
1855 iwmr->ibmr.device = pd->device;
1856 ucontext = to_ucontext(pd->uobject->context);
Henry Oroscof26c7c82016-11-30 14:57:40 -06001857
1858 iwmr->page_size = region->page_size;
1859 iwmr->page_msk = PAGE_MASK;
1860
1861 if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
1862 i40iw_set_hugetlb_values(start, iwmr);
1863
1864 region_length = region->length + (start & (iwmr->page_size - 1));
1865 pg_shift = ffs(iwmr->page_size) - 1;
1866 pbl_depth = region_length >> pg_shift;
1867 pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
Faisal Latifd3749842016-01-20 13:40:09 -06001868 iwmr->length = region->length;
1869
1870 iwpbl->user_base = virt;
1871 palloc = &iwpbl->pble_alloc;
1872
1873 iwmr->type = req.reg_type;
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001874 iwmr->page_cnt = (u32)pbl_depth;
Faisal Latifd3749842016-01-20 13:40:09 -06001875
1876 switch (req.reg_type) {
1877 case IW_MEMREG_TYPE_QP:
1878 use_pbles = ((req.sq_pages + req.rq_pages) > 2);
1879 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1880 if (err)
1881 goto error;
1882 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1883 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1884 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1885 break;
1886 case IW_MEMREG_TYPE_CQ:
1887 use_pbles = (req.cq_pages > 1);
1888 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1889 if (err)
1890 goto error;
1891
1892 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1893 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1894 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1895 break;
1896 case IW_MEMREG_TYPE_MEM:
Henry Oroscob6a529d2016-11-30 14:56:14 -06001897 use_pbles = (iwmr->page_cnt != 1);
Faisal Latifd3749842016-01-20 13:40:09 -06001898 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1899
Faisal Latifd3749842016-01-20 13:40:09 -06001900 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1901 if (err)
1902 goto error;
1903
Henry Oroscob6a529d2016-11-30 14:56:14 -06001904 if (use_pbles) {
Henry Oroscof26c7c82016-11-30 14:57:40 -06001905 ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
Henry Oroscob6a529d2016-11-30 14:56:14 -06001906 if (ret) {
1907 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1908 iwpbl->pbl_allocated = false;
1909 }
1910 }
1911
Faisal Latifd3749842016-01-20 13:40:09 -06001912 access |= i40iw_get_user_access(acc);
1913 stag = i40iw_create_stag(iwdev);
1914 if (!stag) {
1915 err = -ENOMEM;
1916 goto error;
1917 }
1918
1919 iwmr->stag = stag;
1920 iwmr->ibmr.rkey = stag;
1921 iwmr->ibmr.lkey = stag;
1922
1923 err = i40iw_hwreg_mr(iwdev, iwmr, access);
1924 if (err) {
1925 i40iw_free_stag(iwdev, stag);
1926 goto error;
1927 }
Henry Oroscof26c7c82016-11-30 14:57:40 -06001928
Faisal Latifd3749842016-01-20 13:40:09 -06001929 break;
1930 default:
1931 goto error;
1932 }
1933
1934 iwmr->type = req.reg_type;
1935 if (req.reg_type == IW_MEMREG_TYPE_MEM)
1936 i40iw_add_pdusecount(iwpd);
1937 return &iwmr->ibmr;
1938
1939error:
Henry Oroscob6a529d2016-11-30 14:56:14 -06001940 if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
Faisal Latifd3749842016-01-20 13:40:09 -06001941 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1942 ib_umem_release(region);
1943 kfree(iwmr);
1944 return ERR_PTR(err);
1945}
1946
1947/**
1948 * i40iw_reg_phys_mr - register kernel physical memory
1949 * @pd: ibpd pointer
1950 * @addr: physical address of memory to register
1951 * @size: size of memory to register
1952 * @acc: Access rights
1953 * @iova_start: start of virtual address for physical buffers
1954 */
1955struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
1956 u64 addr,
1957 u64 size,
1958 int acc,
1959 u64 *iova_start)
1960{
1961 struct i40iw_pd *iwpd = to_iwpd(pd);
1962 struct i40iw_device *iwdev = to_iwdev(pd->device);
1963 struct i40iw_pbl *iwpbl;
1964 struct i40iw_mr *iwmr;
1965 enum i40iw_status_code status;
1966 u32 stag;
1967 u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1968 int ret;
1969
1970 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1971 if (!iwmr)
1972 return ERR_PTR(-ENOMEM);
1973 iwmr->ibmr.pd = pd;
1974 iwmr->ibmr.device = pd->device;
1975 iwpbl = &iwmr->iwpbl;
1976 iwpbl->iwmr = iwmr;
1977 iwmr->type = IW_MEMREG_TYPE_MEM;
1978 iwpbl->user_base = *iova_start;
1979 stag = i40iw_create_stag(iwdev);
1980 if (!stag) {
1981 ret = -EOVERFLOW;
1982 goto err;
1983 }
1984 access |= i40iw_get_user_access(acc);
1985 iwmr->stag = stag;
1986 iwmr->ibmr.rkey = stag;
1987 iwmr->ibmr.lkey = stag;
1988 iwmr->page_cnt = 1;
1989 iwmr->pgaddrmem[0] = addr;
Mustafa Ismail342c3872016-07-12 11:48:40 -05001990 iwmr->length = size;
Faisal Latifd3749842016-01-20 13:40:09 -06001991 status = i40iw_hwreg_mr(iwdev, iwmr, access);
1992 if (status) {
1993 i40iw_free_stag(iwdev, stag);
1994 ret = -ENOMEM;
1995 goto err;
1996 }
1997
1998 i40iw_add_pdusecount(iwpd);
1999 return &iwmr->ibmr;
2000 err:
2001 kfree(iwmr);
2002 return ERR_PTR(ret);
2003}
2004
2005/**
2006 * i40iw_get_dma_mr - register physical mem
2007 * @pd: ptr of pd
2008 * @acc: access for memory
2009 */
2010static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
2011{
2012 u64 kva = 0;
2013
Mustafa Ismail342c3872016-07-12 11:48:40 -05002014 return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
Faisal Latifd3749842016-01-20 13:40:09 -06002015}
2016
2017/**
2018 * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
2019 * @iwmr: iwmr for IB's user page addresses
2020 * @ucontext: ptr to user context
2021 */
2022static void i40iw_del_memlist(struct i40iw_mr *iwmr,
2023 struct i40iw_ucontext *ucontext)
2024{
2025 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2026 unsigned long flags;
2027
2028 switch (iwmr->type) {
2029 case IW_MEMREG_TYPE_CQ:
2030 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2031 if (!list_empty(&ucontext->cq_reg_mem_list))
2032 list_del(&iwpbl->list);
2033 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2034 break;
2035 case IW_MEMREG_TYPE_QP:
2036 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2037 if (!list_empty(&ucontext->qp_reg_mem_list))
2038 list_del(&iwpbl->list);
2039 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2040 break;
2041 default:
2042 break;
2043 }
2044}
2045
2046/**
2047 * i40iw_dereg_mr - deregister mr
2048 * @ib_mr: mr ptr for dereg
2049 */
2050static int i40iw_dereg_mr(struct ib_mr *ib_mr)
2051{
2052 struct ib_pd *ibpd = ib_mr->pd;
2053 struct i40iw_pd *iwpd = to_iwpd(ibpd);
2054 struct i40iw_mr *iwmr = to_iwmr(ib_mr);
2055 struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
2056 enum i40iw_status_code status;
2057 struct i40iw_dealloc_stag_info *info;
2058 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2059 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
2060 struct i40iw_cqp_request *cqp_request;
2061 struct cqp_commands_info *cqp_info;
2062 u32 stag_idx;
2063
2064 if (iwmr->region)
2065 ib_umem_release(iwmr->region);
2066
2067 if (iwmr->type != IW_MEMREG_TYPE_MEM) {
2068 if (ibpd->uobject) {
2069 struct i40iw_ucontext *ucontext;
2070
2071 ucontext = to_ucontext(ibpd->uobject->context);
2072 i40iw_del_memlist(iwmr, ucontext);
2073 }
2074 if (iwpbl->pbl_allocated)
2075 i40iw_free_pble(iwdev->pble_rsrc, palloc);
Mustafa Ismail433c5812016-08-23 17:24:56 -05002076 kfree(iwmr);
Faisal Latifd3749842016-01-20 13:40:09 -06002077 return 0;
2078 }
2079
2080 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
2081 if (!cqp_request)
2082 return -ENOMEM;
2083
2084 cqp_info = &cqp_request->info;
2085 info = &cqp_info->in.u.dealloc_stag.info;
2086 memset(info, 0, sizeof(*info));
2087
2088 info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
2089 info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
2090 stag_idx = info->stag_idx;
2091 info->mr = true;
2092 if (iwpbl->pbl_allocated)
2093 info->dealloc_pbl = true;
2094
2095 cqp_info->cqp_cmd = OP_DEALLOC_STAG;
2096 cqp_info->post_sq = 1;
2097 cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
2098 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2099 status = i40iw_handle_cqp_op(iwdev, cqp_request);
2100 if (status)
2101 i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
2102 i40iw_rem_pdusecount(iwpd, iwdev);
2103 i40iw_free_stag(iwdev, iwmr->stag);
2104 if (iwpbl->pbl_allocated)
2105 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2106 kfree(iwmr);
2107 return 0;
2108}
2109
2110/**
2111 * i40iw_show_rev
2112 */
2113static ssize_t i40iw_show_rev(struct device *dev,
2114 struct device_attribute *attr, char *buf)
2115{
2116 struct i40iw_ib_device *iwibdev = container_of(dev,
2117 struct i40iw_ib_device,
2118 ibdev.dev);
2119 u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
2120
2121 return sprintf(buf, "%x\n", hw_rev);
2122}
2123
2124/**
Faisal Latifd3749842016-01-20 13:40:09 -06002125 * i40iw_show_hca
2126 */
2127static ssize_t i40iw_show_hca(struct device *dev,
2128 struct device_attribute *attr, char *buf)
2129{
2130 return sprintf(buf, "I40IW\n");
2131}
2132
2133/**
2134 * i40iw_show_board
2135 */
2136static ssize_t i40iw_show_board(struct device *dev,
2137 struct device_attribute *attr,
2138 char *buf)
2139{
2140 return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
2141}
2142
2143static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
Faisal Latifd3749842016-01-20 13:40:09 -06002144static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
2145static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
2146
2147static struct device_attribute *i40iw_dev_attributes[] = {
2148 &dev_attr_hw_rev,
Faisal Latifd3749842016-01-20 13:40:09 -06002149 &dev_attr_hca_type,
2150 &dev_attr_board_id
2151};
2152
2153/**
2154 * i40iw_copy_sg_list - copy sg list for qp
2155 * @sg_list: copied into sg_list
2156 * @sgl: copy from sgl
2157 * @num_sges: count of sg entries
2158 */
2159static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
2160{
2161 unsigned int i;
2162
2163 for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
2164 sg_list[i].tag_off = sgl[i].addr;
2165 sg_list[i].len = sgl[i].length;
2166 sg_list[i].stag = sgl[i].lkey;
2167 }
2168}
2169
2170/**
2171 * i40iw_post_send - kernel application wr
2172 * @ibqp: qp ptr for wr
2173 * @ib_wr: work request ptr
2174 * @bad_wr: return of bad wr if err
2175 */
2176static int i40iw_post_send(struct ib_qp *ibqp,
2177 struct ib_send_wr *ib_wr,
2178 struct ib_send_wr **bad_wr)
2179{
2180 struct i40iw_qp *iwqp;
2181 struct i40iw_qp_uk *ukqp;
2182 struct i40iw_post_sq_info info;
2183 enum i40iw_status_code ret;
2184 int err = 0;
2185 unsigned long flags;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002186 bool inv_stag;
Faisal Latifd3749842016-01-20 13:40:09 -06002187
2188 iwqp = (struct i40iw_qp *)ibqp;
2189 ukqp = &iwqp->sc_qp.qp_uk;
2190
2191 spin_lock_irqsave(&iwqp->lock, flags);
2192 while (ib_wr) {
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002193 inv_stag = false;
Faisal Latifd3749842016-01-20 13:40:09 -06002194 memset(&info, 0, sizeof(info));
2195 info.wr_id = (u64)(ib_wr->wr_id);
2196 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2197 info.signaled = true;
2198 if (ib_wr->send_flags & IB_SEND_FENCE)
2199 info.read_fence = true;
2200
2201 switch (ib_wr->opcode) {
2202 case IB_WR_SEND:
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002203 /* fall-through */
2204 case IB_WR_SEND_WITH_INV:
2205 if (ib_wr->opcode == IB_WR_SEND) {
2206 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2207 info.op_type = I40IW_OP_TYPE_SEND_SOL;
2208 else
2209 info.op_type = I40IW_OP_TYPE_SEND;
2210 } else {
2211 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2212 info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
2213 else
2214 info.op_type = I40IW_OP_TYPE_SEND_INV;
2215 }
Faisal Latifd3749842016-01-20 13:40:09 -06002216
2217 if (ib_wr->send_flags & IB_SEND_INLINE) {
2218 info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2219 info.op.inline_send.len = ib_wr->sg_list[0].length;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002220 ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
Faisal Latifd3749842016-01-20 13:40:09 -06002221 } else {
2222 info.op.send.num_sges = ib_wr->num_sge;
2223 info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002224 ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
Faisal Latifd3749842016-01-20 13:40:09 -06002225 }
2226
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002227 if (ret) {
2228 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2229 err = -ENOMEM;
2230 else
2231 err = -EINVAL;
2232 }
Faisal Latifd3749842016-01-20 13:40:09 -06002233 break;
2234 case IB_WR_RDMA_WRITE:
2235 info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
2236
2237 if (ib_wr->send_flags & IB_SEND_INLINE) {
2238 info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2239 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2240 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2241 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2242 info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
2243 ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
2244 } else {
2245 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2246 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2247 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2248 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2249 info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
2250 ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
2251 }
2252
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002253 if (ret) {
2254 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2255 err = -ENOMEM;
2256 else
2257 err = -EINVAL;
2258 }
Faisal Latifd3749842016-01-20 13:40:09 -06002259 break;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002260 case IB_WR_RDMA_READ_WITH_INV:
2261 inv_stag = true;
2262 /* fall-through*/
Faisal Latifd3749842016-01-20 13:40:09 -06002263 case IB_WR_RDMA_READ:
Shiraz Saleem6c2f7612016-04-22 14:14:27 -05002264 if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
2265 err = -EINVAL;
2266 break;
2267 }
Faisal Latifd3749842016-01-20 13:40:09 -06002268 info.op_type = I40IW_OP_TYPE_RDMA_READ;
2269 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2270 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2271 info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
2272 info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
2273 info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
2274 info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002275 ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002276 if (ret) {
2277 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2278 err = -ENOMEM;
2279 else
2280 err = -EINVAL;
2281 }
Faisal Latifd3749842016-01-20 13:40:09 -06002282 break;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002283 case IB_WR_LOCAL_INV:
2284 info.op_type = I40IW_OP_TYPE_INV_STAG;
2285 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2286 ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
2287 if (ret)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002288 err = -ENOMEM;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002289 break;
2290 case IB_WR_REG_MR:
2291 {
2292 struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002293 int flags = reg_wr(ib_wr)->access;
2294 struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2295 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2296 struct i40iw_fast_reg_stag_info info;
2297
Shiraz Saleem7748e492016-06-14 16:54:19 -05002298 memset(&info, 0, sizeof(info));
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002299 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2300 info.access_rights |= i40iw_get_user_access(flags);
2301 info.stag_key = reg_wr(ib_wr)->key & 0xff;
2302 info.stag_idx = reg_wr(ib_wr)->key >> 8;
Henry Oroscoe6779182016-11-09 21:33:32 -06002303 info.page_size = reg_wr(ib_wr)->mr->page_size;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002304 info.wr_id = ib_wr->wr_id;
2305
2306 info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2307 info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2308 info.total_len = iwmr->ibmr.length;
Shiraz Saleem7748e492016-06-14 16:54:19 -05002309 info.reg_addr_pa = *(u64 *)palloc->level1.addr;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002310 info.first_pm_pbl_index = palloc->level1.idx;
2311 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2312 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2313
Shiraz Saleem7748e492016-06-14 16:54:19 -05002314 if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2315 info.chunk_size = 1;
2316
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002317 ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2318 if (ret)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002319 err = -ENOMEM;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002320 break;
2321 }
Faisal Latifd3749842016-01-20 13:40:09 -06002322 default:
2323 err = -EINVAL;
2324 i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2325 ib_wr->opcode);
2326 break;
2327 }
2328
2329 if (err)
2330 break;
2331 ib_wr = ib_wr->next;
2332 }
2333
2334 if (err)
2335 *bad_wr = ib_wr;
2336 else
2337 ukqp->ops.iw_qp_post_wr(ukqp);
2338 spin_unlock_irqrestore(&iwqp->lock, flags);
2339
2340 return err;
2341}
2342
2343/**
2344 * i40iw_post_recv - post receive wr for kernel application
2345 * @ibqp: ib qp pointer
2346 * @ib_wr: work request for receive
2347 * @bad_wr: bad wr caused an error
2348 */
2349static int i40iw_post_recv(struct ib_qp *ibqp,
2350 struct ib_recv_wr *ib_wr,
2351 struct ib_recv_wr **bad_wr)
2352{
2353 struct i40iw_qp *iwqp;
2354 struct i40iw_qp_uk *ukqp;
2355 struct i40iw_post_rq_info post_recv;
2356 struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
2357 enum i40iw_status_code ret = 0;
2358 unsigned long flags;
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002359 int err = 0;
Faisal Latifd3749842016-01-20 13:40:09 -06002360
2361 iwqp = (struct i40iw_qp *)ibqp;
2362 ukqp = &iwqp->sc_qp.qp_uk;
2363
2364 memset(&post_recv, 0, sizeof(post_recv));
2365 spin_lock_irqsave(&iwqp->lock, flags);
2366 while (ib_wr) {
2367 post_recv.num_sges = ib_wr->num_sge;
2368 post_recv.wr_id = ib_wr->wr_id;
2369 i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2370 post_recv.sg_list = sg_list;
2371 ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
2372 if (ret) {
2373 i40iw_pr_err(" post_recv err %d\n", ret);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002374 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2375 err = -ENOMEM;
2376 else
2377 err = -EINVAL;
Faisal Latifd3749842016-01-20 13:40:09 -06002378 *bad_wr = ib_wr;
2379 goto out;
2380 }
2381 ib_wr = ib_wr->next;
2382 }
2383 out:
2384 spin_unlock_irqrestore(&iwqp->lock, flags);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002385 return err;
Faisal Latifd3749842016-01-20 13:40:09 -06002386}
2387
2388/**
2389 * i40iw_poll_cq - poll cq for completion (kernel apps)
2390 * @ibcq: cq to poll
2391 * @num_entries: number of entries to poll
2392 * @entry: wr of entry completed
2393 */
2394static int i40iw_poll_cq(struct ib_cq *ibcq,
2395 int num_entries,
2396 struct ib_wc *entry)
2397{
2398 struct i40iw_cq *iwcq;
2399 int cqe_count = 0;
2400 struct i40iw_cq_poll_info cq_poll_info;
2401 enum i40iw_status_code ret;
2402 struct i40iw_cq_uk *ukcq;
2403 struct i40iw_sc_qp *qp;
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05002404 struct i40iw_qp *iwqp;
Faisal Latifd3749842016-01-20 13:40:09 -06002405 unsigned long flags;
2406
2407 iwcq = (struct i40iw_cq *)ibcq;
2408 ukcq = &iwcq->sc_cq.cq_uk;
2409
2410 spin_lock_irqsave(&iwcq->lock, flags);
2411 while (cqe_count < num_entries) {
Mustafa Ismailb54143b2016-07-12 11:48:42 -05002412 ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
Faisal Latifd3749842016-01-20 13:40:09 -06002413 if (ret == I40IW_ERR_QUEUE_EMPTY) {
2414 break;
Tatyana Nikolovaf8a4e762016-04-22 14:14:28 -05002415 } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
2416 continue;
Faisal Latifd3749842016-01-20 13:40:09 -06002417 } else if (ret) {
2418 if (!cqe_count)
2419 cqe_count = -1;
2420 break;
2421 }
2422 entry->wc_flags = 0;
2423 entry->wr_id = cq_poll_info.wr_id;
Ismail, Mustafadf356302016-04-18 10:33:00 -05002424 if (cq_poll_info.error) {
Faisal Latifd3749842016-01-20 13:40:09 -06002425 entry->status = IB_WC_WR_FLUSH_ERR;
Ismail, Mustafadf356302016-04-18 10:33:00 -05002426 entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
2427 } else {
2428 entry->status = IB_WC_SUCCESS;
2429 }
Faisal Latifd3749842016-01-20 13:40:09 -06002430
2431 switch (cq_poll_info.op_type) {
2432 case I40IW_OP_TYPE_RDMA_WRITE:
2433 entry->opcode = IB_WC_RDMA_WRITE;
2434 break;
2435 case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
2436 case I40IW_OP_TYPE_RDMA_READ:
2437 entry->opcode = IB_WC_RDMA_READ;
2438 break;
2439 case I40IW_OP_TYPE_SEND_SOL:
2440 case I40IW_OP_TYPE_SEND_SOL_INV:
2441 case I40IW_OP_TYPE_SEND_INV:
2442 case I40IW_OP_TYPE_SEND:
2443 entry->opcode = IB_WC_SEND;
2444 break;
2445 case I40IW_OP_TYPE_REC:
2446 entry->opcode = IB_WC_RECV;
2447 break;
2448 default:
2449 entry->opcode = IB_WC_RECV;
2450 break;
2451 }
2452
Faisal Latifd3749842016-01-20 13:40:09 -06002453 entry->ex.imm_data = 0;
2454 qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
2455 entry->qp = (struct ib_qp *)qp->back_qp;
2456 entry->src_qp = cq_poll_info.qp_id;
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05002457 iwqp = (struct i40iw_qp *)qp->back_qp;
2458 if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2459 if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
2460 complete(&iwqp->sq_drained);
2461 if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
2462 complete(&iwqp->rq_drained);
2463 }
Faisal Latifd3749842016-01-20 13:40:09 -06002464 entry->byte_len = cq_poll_info.bytes_xfered;
2465 entry++;
2466 cqe_count++;
2467 }
2468 spin_unlock_irqrestore(&iwcq->lock, flags);
2469 return cqe_count;
2470}
2471
2472/**
2473 * i40iw_req_notify_cq - arm cq kernel application
2474 * @ibcq: cq to arm
2475 * @notify_flags: notofication flags
2476 */
2477static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2478 enum ib_cq_notify_flags notify_flags)
2479{
2480 struct i40iw_cq *iwcq;
2481 struct i40iw_cq_uk *ukcq;
Shiraz Saleem747f1c6d2016-06-14 16:54:16 -05002482 unsigned long flags;
2483 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
Faisal Latifd3749842016-01-20 13:40:09 -06002484
2485 iwcq = (struct i40iw_cq *)ibcq;
2486 ukcq = &iwcq->sc_cq.cq_uk;
Shiraz Saleem747f1c6d2016-06-14 16:54:16 -05002487 if (notify_flags == IB_CQ_SOLICITED)
2488 cq_notify = IW_CQ_COMPL_SOLICITED;
2489 spin_lock_irqsave(&iwcq->lock, flags);
Faisal Latifd3749842016-01-20 13:40:09 -06002490 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
Shiraz Saleem747f1c6d2016-06-14 16:54:16 -05002491 spin_unlock_irqrestore(&iwcq->lock, flags);
Faisal Latifd3749842016-01-20 13:40:09 -06002492 return 0;
2493}
2494
2495/**
2496 * i40iw_port_immutable - return port's immutable data
2497 * @ibdev: ib dev struct
2498 * @port_num: port number
2499 * @immutable: immutable data for the port return
2500 */
2501static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2502 struct ib_port_immutable *immutable)
2503{
2504 struct ib_port_attr attr;
2505 int err;
2506
2507 err = i40iw_query_port(ibdev, port_num, &attr);
2508
2509 if (err)
2510 return err;
2511
2512 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2513 immutable->gid_tbl_len = attr.gid_tbl_len;
2514 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2515
2516 return 0;
2517}
2518
Christoph Lameterb40f4752016-05-16 12:49:33 -05002519static const char * const i40iw_hw_stat_names[] = {
2520 // 32bit names
2521 [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2522 [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2523 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2524 [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2525 [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2526 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2527 [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2528 [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2529 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2530 // 64bit names
2531 [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2532 "ip4InOctets",
2533 [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2534 "ip4InPkts",
2535 [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2536 "ip4InReasmRqd",
2537 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2538 "ip4InMcastPkts",
2539 [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2540 "ip4OutOctets",
2541 [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2542 "ip4OutPkts",
2543 [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2544 "ip4OutSegRqd",
2545 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2546 "ip4OutMcastPkts",
2547 [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2548 "ip6InOctets",
2549 [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2550 "ip6InPkts",
2551 [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2552 "ip6InReasmRqd",
2553 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2554 "ip6InMcastPkts",
2555 [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2556 "ip6OutOctets",
2557 [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2558 "ip6OutPkts",
2559 [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2560 "ip6OutSegRqd",
2561 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2562 "ip6OutMcastPkts",
2563 [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2564 "tcpInSegs",
2565 [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2566 "tcpOutSegs",
2567 [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2568 "iwInRdmaReads",
2569 [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2570 "iwInRdmaSends",
2571 [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2572 "iwInRdmaWrites",
2573 [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2574 "iwOutRdmaReads",
2575 [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2576 "iwOutRdmaSends",
2577 [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2578 "iwOutRdmaWrites",
2579 [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2580 "iwRdmaBnd",
2581 [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2582 "iwRdmaInv"
2583};
2584
Ira Weinyf65c52c2016-06-15 02:21:59 -04002585static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str,
2586 size_t str_len)
2587{
2588 u32 firmware_version = I40IW_FW_VERSION;
2589
2590 snprintf(str, str_len, "%u.%u", firmware_version,
2591 (firmware_version & 0x000000ff));
2592}
2593
Faisal Latifd3749842016-01-20 13:40:09 -06002594/**
Christoph Lameterb40f4752016-05-16 12:49:33 -05002595 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2596 * @ibdev: device pointer from stack
2597 * @port_num: port number
Faisal Latifd3749842016-01-20 13:40:09 -06002598 */
Christoph Lameterb40f4752016-05-16 12:49:33 -05002599static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2600 u8 port_num)
2601{
2602 struct i40iw_device *iwdev = to_iwdev(ibdev);
2603 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2604 int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2605 I40IW_HW_STAT_INDEX_MAX_64;
2606 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2607
2608 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2609 (I40IW_HW_STAT_INDEX_MAX_32 +
2610 I40IW_HW_STAT_INDEX_MAX_64));
2611
2612 /*
2613 * PFs get the default update lifespan, but VFs only update once
2614 * per second
2615 */
2616 if (!dev->is_pf)
2617 lifespan = 1000;
2618 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2619 lifespan);
2620}
2621
2622/**
2623 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2624 * @ibdev: device pointer from stack
2625 * @stats: stats pointer from stack
2626 * @port_num: port number
2627 * @index: which hw counter the stack is requesting we update
2628 */
2629static int i40iw_get_hw_stats(struct ib_device *ibdev,
2630 struct rdma_hw_stats *stats,
2631 u8 port_num, int index)
Faisal Latifd3749842016-01-20 13:40:09 -06002632{
2633 struct i40iw_device *iwdev = to_iwdev(ibdev);
2634 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2635 struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
2636 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
Faisal Latifd3749842016-01-20 13:40:09 -06002637 unsigned long flags;
2638
Faisal Latifd3749842016-01-20 13:40:09 -06002639 if (dev->is_pf) {
2640 spin_lock_irqsave(&devstat->stats_lock, flags);
2641 devstat->ops.iw_hw_stat_read_all(devstat,
2642 &devstat->hw_stats);
2643 spin_unlock_irqrestore(&devstat->stats_lock, flags);
2644 } else {
Christoph Lameterb40f4752016-05-16 12:49:33 -05002645 if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2646 return -ENOSYS;
Faisal Latifd3749842016-01-20 13:40:09 -06002647 }
2648
Christoph Lameterb40f4752016-05-16 12:49:33 -05002649 memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
Faisal Latifd3749842016-01-20 13:40:09 -06002650
Christoph Lameterb40f4752016-05-16 12:49:33 -05002651 return stats->num_counters;
Faisal Latifd3749842016-01-20 13:40:09 -06002652}
2653
2654/**
2655 * i40iw_query_gid - Query port GID
2656 * @ibdev: device pointer from stack
2657 * @port: port number
2658 * @index: Entry index
2659 * @gid: Global ID
2660 */
2661static int i40iw_query_gid(struct ib_device *ibdev,
2662 u8 port,
2663 int index,
2664 union ib_gid *gid)
2665{
2666 struct i40iw_device *iwdev = to_iwdev(ibdev);
2667
2668 memset(gid->raw, 0, sizeof(gid->raw));
2669 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2670 return 0;
2671}
2672
2673/**
2674 * i40iw_modify_port Modify port properties
2675 * @ibdev: device pointer from stack
2676 * @port: port number
2677 * @port_modify_mask: mask for port modifications
2678 * @props: port properties
2679 */
2680static int i40iw_modify_port(struct ib_device *ibdev,
2681 u8 port,
2682 int port_modify_mask,
2683 struct ib_port_modify *props)
2684{
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002685 return -ENOSYS;
Faisal Latifd3749842016-01-20 13:40:09 -06002686}
2687
2688/**
2689 * i40iw_query_pkey - Query partition key
2690 * @ibdev: device pointer from stack
2691 * @port: port number
2692 * @index: index of pkey
2693 * @pkey: pointer to store the pkey
2694 */
2695static int i40iw_query_pkey(struct ib_device *ibdev,
2696 u8 port,
2697 u16 index,
2698 u16 *pkey)
2699{
2700 *pkey = 0;
2701 return 0;
2702}
2703
2704/**
2705 * i40iw_create_ah - create address handle
2706 * @ibpd: ptr of pd
2707 * @ah_attr: address handle attributes
2708 */
2709static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
2710 struct ib_ah_attr *attr)
2711{
2712 return ERR_PTR(-ENOSYS);
2713}
2714
2715/**
2716 * i40iw_destroy_ah - Destroy address handle
2717 * @ah: pointer to address handle
2718 */
2719static int i40iw_destroy_ah(struct ib_ah *ah)
2720{
2721 return -ENOSYS;
2722}
2723
2724/**
2725 * i40iw_init_rdma_device - initialization of iwarp device
2726 * @iwdev: iwarp device
2727 */
2728static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2729{
2730 struct i40iw_ib_device *iwibdev;
2731 struct net_device *netdev = iwdev->netdev;
2732 struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
2733
2734 iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
2735 if (!iwibdev) {
2736 i40iw_pr_err("iwdev == NULL\n");
2737 return NULL;
2738 }
2739 strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
2740 iwibdev->ibdev.owner = THIS_MODULE;
2741 iwdev->iwibdev = iwibdev;
2742 iwibdev->iwdev = iwdev;
2743
2744 iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
2745 ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
2746
2747 iwibdev->ibdev.uverbs_cmd_mask =
2748 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2749 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2750 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2751 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2752 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2753 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2754 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2755 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2756 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2757 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2758 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2759 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2760 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2761 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2762 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2763 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2764 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2765 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2766 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2767 (1ull << IB_USER_VERBS_CMD_POST_SEND);
2768 iwibdev->ibdev.phys_port_cnt = 1;
Henry Oroscoe69c5092016-11-09 21:24:48 -06002769 iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
Faisal Latifd3749842016-01-20 13:40:09 -06002770 iwibdev->ibdev.dma_device = &pcidev->dev;
2771 iwibdev->ibdev.dev.parent = &pcidev->dev;
2772 iwibdev->ibdev.query_port = i40iw_query_port;
2773 iwibdev->ibdev.modify_port = i40iw_modify_port;
2774 iwibdev->ibdev.query_pkey = i40iw_query_pkey;
2775 iwibdev->ibdev.query_gid = i40iw_query_gid;
2776 iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
2777 iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
2778 iwibdev->ibdev.mmap = i40iw_mmap;
2779 iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
2780 iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
2781 iwibdev->ibdev.create_qp = i40iw_create_qp;
2782 iwibdev->ibdev.modify_qp = i40iw_modify_qp;
2783 iwibdev->ibdev.query_qp = i40iw_query_qp;
2784 iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
2785 iwibdev->ibdev.create_cq = i40iw_create_cq;
2786 iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
2787 iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
2788 iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
2789 iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
Christoph Lameterb40f4752016-05-16 12:49:33 -05002790 iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
2791 iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
Faisal Latifd3749842016-01-20 13:40:09 -06002792 iwibdev->ibdev.query_device = i40iw_query_device;
2793 iwibdev->ibdev.create_ah = i40iw_create_ah;
2794 iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05002795 iwibdev->ibdev.drain_sq = i40iw_drain_sq;
2796 iwibdev->ibdev.drain_rq = i40iw_drain_rq;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002797 iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
2798 iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
Faisal Latifd3749842016-01-20 13:40:09 -06002799 iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
2800 if (!iwibdev->ibdev.iwcm) {
2801 ib_dealloc_device(&iwibdev->ibdev);
2802 i40iw_pr_err("iwcm == NULL\n");
2803 return NULL;
2804 }
2805
2806 iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
2807 iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
2808 iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
2809 iwibdev->ibdev.iwcm->connect = i40iw_connect;
2810 iwibdev->ibdev.iwcm->accept = i40iw_accept;
2811 iwibdev->ibdev.iwcm->reject = i40iw_reject;
2812 iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
2813 iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002814 memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
2815 sizeof(iwibdev->ibdev.iwcm->ifname));
Faisal Latifd3749842016-01-20 13:40:09 -06002816 iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
Ira Weinyf65c52c2016-06-15 02:21:59 -04002817 iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
Faisal Latifd3749842016-01-20 13:40:09 -06002818 iwibdev->ibdev.poll_cq = i40iw_poll_cq;
2819 iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
2820 iwibdev->ibdev.post_send = i40iw_post_send;
2821 iwibdev->ibdev.post_recv = i40iw_post_recv;
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002822
Faisal Latifd3749842016-01-20 13:40:09 -06002823 return iwibdev;
2824}
2825
2826/**
2827 * i40iw_port_ibevent - indicate port event
2828 * @iwdev: iwarp device
2829 */
2830void i40iw_port_ibevent(struct i40iw_device *iwdev)
2831{
2832 struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2833 struct ib_event event;
2834
2835 event.device = &iwibdev->ibdev;
2836 event.element.port_num = 1;
2837 event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2838 ib_dispatch_event(&event);
2839}
2840
2841/**
2842 * i40iw_unregister_rdma_device - unregister of iwarp from IB
2843 * @iwibdev: rdma device ptr
2844 */
2845static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
2846{
2847 int i;
2848
2849 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
2850 device_remove_file(&iwibdev->ibdev.dev,
2851 i40iw_dev_attributes[i]);
2852 ib_unregister_device(&iwibdev->ibdev);
2853}
2854
2855/**
2856 * i40iw_destroy_rdma_device - destroy rdma device and free resources
2857 * @iwibdev: IB device ptr
2858 */
2859void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
2860{
2861 if (!iwibdev)
2862 return;
2863
2864 i40iw_unregister_rdma_device(iwibdev);
2865 kfree(iwibdev->ibdev.iwcm);
2866 iwibdev->ibdev.iwcm = NULL;
Mustafa Ismaild5965932016-11-30 14:59:26 -06002867 wait_event_timeout(iwibdev->iwdev->close_wq,
2868 !atomic64_read(&iwibdev->iwdev->use_count),
2869 I40IW_EVENT_TIMEOUT);
Faisal Latifd3749842016-01-20 13:40:09 -06002870 ib_dealloc_device(&iwibdev->ibdev);
2871}
2872
2873/**
2874 * i40iw_register_rdma_device - register iwarp device to IB
2875 * @iwdev: iwarp device
2876 */
2877int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2878{
2879 int i, ret;
2880 struct i40iw_ib_device *iwibdev;
2881
2882 iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2883 if (!iwdev->iwibdev)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002884 return -ENOMEM;
Faisal Latifd3749842016-01-20 13:40:09 -06002885 iwibdev = iwdev->iwibdev;
2886
2887 ret = ib_register_device(&iwibdev->ibdev, NULL);
2888 if (ret)
2889 goto error;
2890
2891 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
2892 ret =
2893 device_create_file(&iwibdev->ibdev.dev,
2894 i40iw_dev_attributes[i]);
2895 if (ret) {
2896 while (i > 0) {
2897 i--;
2898 device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
2899 }
2900 ib_unregister_device(&iwibdev->ibdev);
2901 goto error;
2902 }
2903 }
2904 return 0;
2905error:
2906 kfree(iwdev->iwibdev->ibdev.iwcm);
2907 iwdev->iwibdev->ibdev.iwcm = NULL;
2908 ib_dealloc_device(&iwdev->iwibdev->ibdev);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002909 return ret;
Faisal Latifd3749842016-01-20 13:40:09 -06002910}