blob: 1c2f0a19bd6306a51678da255e6befe3e04af0ac [file] [log] [blame]
Faisal Latifd3749842016-01-20 13:40:09 -06001/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/random.h>
38#include <linux/highmem.h>
39#include <linux/time.h>
Henry Oroscof26c7c82016-11-30 14:57:40 -060040#include <linux/hugetlb.h>
Faisal Latifd3749842016-01-20 13:40:09 -060041#include <asm/byteorder.h>
42#include <net/ip.h>
43#include <rdma/ib_verbs.h>
44#include <rdma/iw_cm.h>
45#include <rdma/ib_user_verbs.h>
46#include <rdma/ib_umem.h>
47#include "i40iw.h"
48
49/**
50 * i40iw_query_device - get device attributes
51 * @ibdev: device pointer from stack
52 * @props: returning device attributes
53 * @udata: user data
54 */
55static int i40iw_query_device(struct ib_device *ibdev,
56 struct ib_device_attr *props,
57 struct ib_udata *udata)
58{
59 struct i40iw_device *iwdev = to_iwdev(ibdev);
60
61 if (udata->inlen || udata->outlen)
62 return -EINVAL;
63 memset(props, 0, sizeof(*props));
64 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
65 props->fw_ver = I40IW_FW_VERSION;
66 props->device_cap_flags = iwdev->device_cap_flags;
Ismail, Mustafa4920dc32016-04-18 10:33:01 -050067 props->vendor_id = iwdev->ldev->pcidev->vendor;
68 props->vendor_part_id = iwdev->ldev->pcidev->device;
Faisal Latifd3749842016-01-20 13:40:09 -060069 props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
70 props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
Henry Orosco85a87c92016-11-09 21:30:28 -060071 props->max_qp = iwdev->max_qp - iwdev->used_qps;
Faisal Latifd3749842016-01-20 13:40:09 -060072 props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
73 props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
Henry Orosco85a87c92016-11-09 21:30:28 -060074 props->max_cq = iwdev->max_cq - iwdev->used_cqs;
Faisal Latifd3749842016-01-20 13:40:09 -060075 props->max_cqe = iwdev->max_cqe;
Henry Orosco85a87c92016-11-09 21:30:28 -060076 props->max_mr = iwdev->max_mr - iwdev->used_mrs;
77 props->max_pd = iwdev->max_pd - iwdev->used_pds;
Shiraz Saleem6c2f7612016-04-22 14:14:27 -050078 props->max_sge_rd = I40IW_MAX_SGE_RD;
Faisal Latifd3749842016-01-20 13:40:09 -060079 props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
80 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
81 props->atomic_cap = IB_ATOMIC_NONE;
82 props->max_map_per_fmr = 1;
Faisal Latif0477e182016-06-14 16:54:18 -050083 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
Faisal Latifd3749842016-01-20 13:40:09 -060084 return 0;
85}
86
87/**
88 * i40iw_query_port - get port attrubutes
89 * @ibdev: device pointer from stack
90 * @port: port number for query
91 * @props: returning device attributes
92 */
93static int i40iw_query_port(struct ib_device *ibdev,
94 u8 port,
95 struct ib_port_attr *props)
96{
97 struct i40iw_device *iwdev = to_iwdev(ibdev);
98 struct net_device *netdev = iwdev->netdev;
99
100 memset(props, 0, sizeof(*props));
101
102 props->max_mtu = IB_MTU_4096;
103 if (netdev->mtu >= 4096)
104 props->active_mtu = IB_MTU_4096;
105 else if (netdev->mtu >= 2048)
106 props->active_mtu = IB_MTU_2048;
107 else if (netdev->mtu >= 1024)
108 props->active_mtu = IB_MTU_1024;
109 else if (netdev->mtu >= 512)
110 props->active_mtu = IB_MTU_512;
111 else
112 props->active_mtu = IB_MTU_256;
113
114 props->lid = 1;
115 if (netif_carrier_ok(iwdev->netdev))
116 props->state = IB_PORT_ACTIVE;
117 else
118 props->state = IB_PORT_DOWN;
119 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
120 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
121 props->gid_tbl_len = 1;
122 props->pkey_tbl_len = 1;
123 props->active_width = IB_WIDTH_4X;
124 props->active_speed = 1;
Ismail, Mustafabd57aea2016-04-18 10:32:57 -0500125 props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
Faisal Latifd3749842016-01-20 13:40:09 -0600126 return 0;
127}
128
129/**
130 * i40iw_alloc_ucontext - Allocate the user context data structure
131 * @ibdev: device pointer from stack
132 * @udata: user data
133 *
134 * This keeps track of all objects associated with a particular
135 * user-mode client.
136 */
137static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
138 struct ib_udata *udata)
139{
140 struct i40iw_device *iwdev = to_iwdev(ibdev);
141 struct i40iw_alloc_ucontext_req req;
142 struct i40iw_alloc_ucontext_resp uresp;
143 struct i40iw_ucontext *ucontext;
144
145 if (ib_copy_from_udata(&req, udata, sizeof(req)))
146 return ERR_PTR(-EINVAL);
147
148 if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
149 i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
150 req.userspace_ver, I40IW_ABI_USERSPACE_VER);
151 return ERR_PTR(-EINVAL);
152 }
153
154 memset(&uresp, 0, sizeof(uresp));
155 uresp.max_qps = iwdev->max_qp;
156 uresp.max_pds = iwdev->max_pd;
157 uresp.wq_size = iwdev->max_qp_wr * 2;
158 uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
159
160 ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
161 if (!ucontext)
162 return ERR_PTR(-ENOMEM);
163
164 ucontext->iwdev = iwdev;
165
166 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
167 kfree(ucontext);
168 return ERR_PTR(-EFAULT);
169 }
170
171 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
172 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
173 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
174 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
175
176 return &ucontext->ibucontext;
177}
178
179/**
180 * i40iw_dealloc_ucontext - deallocate the user context data structure
181 * @context: user context created during alloc
182 */
183static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
184{
185 struct i40iw_ucontext *ucontext = to_ucontext(context);
186 unsigned long flags;
187
188 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
189 if (!list_empty(&ucontext->cq_reg_mem_list)) {
190 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
191 return -EBUSY;
192 }
193 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
194 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
195 if (!list_empty(&ucontext->qp_reg_mem_list)) {
196 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
197 return -EBUSY;
198 }
199 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
200
201 kfree(ucontext);
202 return 0;
203}
204
205/**
206 * i40iw_mmap - user memory map
207 * @context: context created during alloc
208 * @vma: kernel info for user memory map
209 */
210static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
211{
212 struct i40iw_ucontext *ucontext;
213 u64 db_addr_offset;
214 u64 push_offset;
215
216 ucontext = to_ucontext(context);
217 if (ucontext->iwdev->sc_dev.is_pf) {
218 db_addr_offset = I40IW_DB_ADDR_OFFSET;
219 push_offset = I40IW_PUSH_OFFSET;
220 if (vma->vm_pgoff)
221 vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
222 } else {
223 db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
224 push_offset = I40IW_VF_PUSH_OFFSET;
225 if (vma->vm_pgoff)
226 vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
227 }
228
229 vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
230
231 if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
232 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
233 vma->vm_private_data = ucontext;
234 } else {
235 if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
237 else
238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
239 }
240
241 if (io_remap_pfn_range(vma, vma->vm_start,
242 vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
243 PAGE_SIZE, vma->vm_page_prot))
244 return -EAGAIN;
245
246 return 0;
247}
248
249/**
250 * i40iw_alloc_push_page - allocate a push page for qp
251 * @iwdev: iwarp device
252 * @qp: hardware control qp
253 */
254static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
255{
256 struct i40iw_cqp_request *cqp_request;
257 struct cqp_commands_info *cqp_info;
Faisal Latifd3749842016-01-20 13:40:09 -0600258 enum i40iw_status_code status;
259
260 if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
261 return;
262
263 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
264 if (!cqp_request)
265 return;
266
267 atomic_inc(&cqp_request->refcount);
268
269 cqp_info = &cqp_request->info;
270 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
271 cqp_info->post_sq = 1;
272
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500273 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
Faisal Latifd3749842016-01-20 13:40:09 -0600274 cqp_info->in.u.manage_push_page.info.free_page = 0;
275 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
276 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
277
278 status = i40iw_handle_cqp_op(iwdev, cqp_request);
279 if (!status)
280 qp->push_idx = cqp_request->compl_info.op_ret_val;
281 else
282 i40iw_pr_err("CQP-OP Push page fail");
283 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
284}
285
286/**
287 * i40iw_dealloc_push_page - free a push page for qp
288 * @iwdev: iwarp device
289 * @qp: hardware control qp
290 */
291static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
292{
293 struct i40iw_cqp_request *cqp_request;
294 struct cqp_commands_info *cqp_info;
Faisal Latifd3749842016-01-20 13:40:09 -0600295 enum i40iw_status_code status;
296
297 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
298 return;
299
300 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
301 if (!cqp_request)
302 return;
303
304 cqp_info = &cqp_request->info;
305 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
306 cqp_info->post_sq = 1;
307
308 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500309 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
Faisal Latifd3749842016-01-20 13:40:09 -0600310 cqp_info->in.u.manage_push_page.info.free_page = 1;
311 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
312 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
313
314 status = i40iw_handle_cqp_op(iwdev, cqp_request);
315 if (!status)
316 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
317 else
318 i40iw_pr_err("CQP-OP Push page fail");
319}
320
321/**
322 * i40iw_alloc_pd - allocate protection domain
323 * @ibdev: device pointer from stack
324 * @context: user context created during alloc
325 * @udata: user data
326 */
327static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
328 struct ib_ucontext *context,
329 struct ib_udata *udata)
330{
331 struct i40iw_pd *iwpd;
332 struct i40iw_device *iwdev = to_iwdev(ibdev);
333 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
334 struct i40iw_alloc_pd_resp uresp;
335 struct i40iw_sc_pd *sc_pd;
336 u32 pd_id = 0;
337 int err;
338
339 err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
340 iwdev->max_pd, &pd_id, &iwdev->next_pd);
341 if (err) {
342 i40iw_pr_err("alloc resource failed\n");
343 return ERR_PTR(err);
344 }
345
346 iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
347 if (!iwpd) {
348 err = -ENOMEM;
349 goto free_res;
350 }
351
352 sc_pd = &iwpd->sc_pd;
353 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
354
355 if (context) {
356 memset(&uresp, 0, sizeof(uresp));
357 uresp.pd_id = pd_id;
358 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
359 err = -EFAULT;
360 goto error;
361 }
362 }
363
364 i40iw_add_pdusecount(iwpd);
365 return &iwpd->ibpd;
366error:
367 kfree(iwpd);
368free_res:
369 i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
370 return ERR_PTR(err);
371}
372
373/**
374 * i40iw_dealloc_pd - deallocate pd
375 * @ibpd: ptr of pd to be deallocated
376 */
377static int i40iw_dealloc_pd(struct ib_pd *ibpd)
378{
379 struct i40iw_pd *iwpd = to_iwpd(ibpd);
380 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
381
382 i40iw_rem_pdusecount(iwpd, iwdev);
383 return 0;
384}
385
386/**
387 * i40iw_qp_roundup - return round up qp ring size
388 * @wr_ring_size: ring size to round up
389 */
390static int i40iw_qp_roundup(u32 wr_ring_size)
391{
392 int scount = 1;
393
394 if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
395 wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
396
397 for (wr_ring_size--; scount <= 16; scount *= 2)
398 wr_ring_size |= wr_ring_size >> scount;
399 return ++wr_ring_size;
400}
401
402/**
403 * i40iw_get_pbl - Retrieve pbl from a list given a virtual
404 * address
405 * @va: user virtual address
406 * @pbl_list: pbl list to search in (QP's or CQ's)
407 */
408static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
409 struct list_head *pbl_list)
410{
411 struct i40iw_pbl *iwpbl;
412
413 list_for_each_entry(iwpbl, pbl_list, list) {
414 if (iwpbl->user_base == va) {
415 list_del(&iwpbl->list);
416 return iwpbl;
417 }
418 }
419 return NULL;
420}
421
422/**
423 * i40iw_free_qp_resources - free up memory resources for qp
424 * @iwdev: iwarp device
425 * @iwqp: qp ptr (user or kernel)
426 * @qp_num: qp number assigned
427 */
428void i40iw_free_qp_resources(struct i40iw_device *iwdev,
429 struct i40iw_qp *iwqp,
430 u32 qp_num)
431{
432 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
433 if (qp_num)
434 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
435 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
436 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
437 kfree(iwqp->kqp.wrid_mem);
438 iwqp->kqp.wrid_mem = NULL;
439 kfree(iwqp->allocated_buffer);
Faisal Latifd3749842016-01-20 13:40:09 -0600440}
441
442/**
443 * i40iw_clean_cqes - clean cq entries for qp
444 * @iwqp: qp ptr (user or kernel)
445 * @iwcq: cq ptr
446 */
447static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
448{
449 struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
450
451 ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
452}
453
454/**
455 * i40iw_destroy_qp - destroy qp
456 * @ibqp: qp's ib pointer also to get to device's qp address
457 */
458static int i40iw_destroy_qp(struct ib_qp *ibqp)
459{
460 struct i40iw_qp *iwqp = to_iwqp(ibqp);
461
462 iwqp->destroyed = 1;
463
464 if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
465 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
466
467 if (!iwqp->user_mode) {
468 if (iwqp->iwscq) {
469 i40iw_clean_cqes(iwqp, iwqp->iwscq);
470 if (iwqp->iwrcq != iwqp->iwscq)
471 i40iw_clean_cqes(iwqp, iwqp->iwrcq);
472 }
473 }
474
475 i40iw_rem_ref(&iwqp->ibqp);
476 return 0;
477}
478
479/**
480 * i40iw_setup_virt_qp - setup for allocation of virtual qp
481 * @dev: iwarp device
482 * @qp: qp ptr
483 * @init_info: initialize info to return
484 */
485static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
486 struct i40iw_qp *iwqp,
487 struct i40iw_qp_init_info *init_info)
488{
489 struct i40iw_pbl *iwpbl = iwqp->iwpbl;
490 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
491
492 iwqp->page = qpmr->sq_page;
493 init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
494 if (iwpbl->pbl_allocated) {
495 init_info->virtual_map = true;
496 init_info->sq_pa = qpmr->sq_pbl.idx;
497 init_info->rq_pa = qpmr->rq_pbl.idx;
498 } else {
499 init_info->sq_pa = qpmr->sq_pbl.addr;
500 init_info->rq_pa = qpmr->rq_pbl.addr;
501 }
502 return 0;
503}
504
505/**
506 * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
507 * @iwdev: iwarp device
508 * @iwqp: qp ptr (user or kernel)
509 * @info: initialize info to return
510 */
511static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
512 struct i40iw_qp *iwqp,
513 struct i40iw_qp_init_info *info)
514{
515 struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
516 u32 sqdepth, rqdepth;
517 u32 sq_size, rq_size;
518 u8 sqshift, rqshift;
519 u32 size;
520 enum i40iw_status_code status;
521 struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
522
Faisal Latifd3749842016-01-20 13:40:09 -0600523 sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
524 rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
525
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500526 status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
Faisal Latifd3749842016-01-20 13:40:09 -0600527 if (!status)
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500528 status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
Faisal Latifd3749842016-01-20 13:40:09 -0600529
530 if (status)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -0500531 return -ENOMEM;
Faisal Latifd3749842016-01-20 13:40:09 -0600532
533 sqdepth = sq_size << sqshift;
534 rqdepth = rq_size << rqshift;
535
536 size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
537 iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
538
539 ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
540 if (!ukinfo->sq_wrtrk_array)
541 return -ENOMEM;
542
543 ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
544
545 size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
546 size += (I40IW_SHADOW_AREA_SIZE << 3);
547
548 status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
549 if (status) {
550 kfree(ukinfo->sq_wrtrk_array);
551 ukinfo->sq_wrtrk_array = NULL;
552 return -ENOMEM;
553 }
554
555 ukinfo->sq = mem->va;
556 info->sq_pa = mem->pa;
557
558 ukinfo->rq = &ukinfo->sq[sqdepth];
559 info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
560
561 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
562 info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
563
564 ukinfo->sq_size = sq_size;
565 ukinfo->rq_size = rq_size;
566 ukinfo->qp_id = iwqp->ibqp.qp_num;
567 return 0;
568}
569
570/**
571 * i40iw_create_qp - create qp
572 * @ibpd: ptr of pd
573 * @init_attr: attributes for qp
574 * @udata: user data for create qp
575 */
576static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
577 struct ib_qp_init_attr *init_attr,
578 struct ib_udata *udata)
579{
580 struct i40iw_pd *iwpd = to_iwpd(ibpd);
581 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
582 struct i40iw_cqp *iwcqp = &iwdev->cqp;
583 struct i40iw_qp *iwqp;
584 struct i40iw_ucontext *ucontext;
585 struct i40iw_create_qp_req req;
586 struct i40iw_create_qp_resp uresp;
587 u32 qp_num = 0;
588 void *mem;
589 enum i40iw_status_code ret;
590 int err_code;
591 int sq_size;
592 int rq_size;
593 struct i40iw_sc_qp *qp;
594 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
595 struct i40iw_qp_init_info init_info;
596 struct i40iw_create_qp_info *qp_info;
597 struct i40iw_cqp_request *cqp_request;
598 struct cqp_commands_info *cqp_info;
599
600 struct i40iw_qp_host_ctx_info *ctx_info;
601 struct i40iwarp_offload_info *iwarp_info;
602 unsigned long flags;
603
604 if (init_attr->create_flags)
605 return ERR_PTR(-EINVAL);
606 if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
607 init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
608
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500609 if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
610 init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
611
Henry Orosco01d0b362016-11-09 21:26:39 -0600612 if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
613 init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
614
Faisal Latifd3749842016-01-20 13:40:09 -0600615 memset(&init_info, 0, sizeof(init_info));
616
617 sq_size = init_attr->cap.max_send_wr;
618 rq_size = init_attr->cap.max_recv_wr;
619
620 init_info.qp_uk_init_info.sq_size = sq_size;
621 init_info.qp_uk_init_info.rq_size = rq_size;
622 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
623 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500624 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
Faisal Latifd3749842016-01-20 13:40:09 -0600625
626 mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
627 if (!mem)
628 return ERR_PTR(-ENOMEM);
629
630 iwqp = (struct i40iw_qp *)mem;
631 qp = &iwqp->sc_qp;
632 qp->back_qp = (void *)iwqp;
633 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
634
635 iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
636
637 if (i40iw_allocate_dma_mem(dev->hw,
638 &iwqp->q2_ctx_mem,
639 I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
640 256)) {
641 i40iw_pr_err("dma_mem failed\n");
642 err_code = -ENOMEM;
643 goto error;
644 }
645
646 init_info.q2 = iwqp->q2_ctx_mem.va;
647 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
648
649 init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
650 init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
651
652 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
653 &qp_num, &iwdev->next_qp);
654 if (err_code) {
655 i40iw_pr_err("qp resource\n");
656 goto error;
657 }
658
659 iwqp->allocated_buffer = mem;
660 iwqp->iwdev = iwdev;
661 iwqp->iwpd = iwpd;
662 iwqp->ibqp.qp_num = qp_num;
663 qp = &iwqp->sc_qp;
664 iwqp->iwscq = to_iwcq(init_attr->send_cq);
665 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
666
667 iwqp->host_ctx.va = init_info.host_ctx;
668 iwqp->host_ctx.pa = init_info.host_ctx_pa;
669 iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
670
671 init_info.pd = &iwpd->sc_pd;
672 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
673 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
674
675 if (init_attr->qp_type != IB_QPT_RC) {
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -0500676 err_code = -EINVAL;
Faisal Latifd3749842016-01-20 13:40:09 -0600677 goto error;
678 }
679 if (iwdev->push_mode)
680 i40iw_alloc_push_page(iwdev, qp);
681 if (udata) {
682 err_code = ib_copy_from_udata(&req, udata, sizeof(req));
683 if (err_code) {
684 i40iw_pr_err("ib_copy_from_data\n");
685 goto error;
686 }
687 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
688 if (ibpd->uobject && ibpd->uobject->context) {
689 iwqp->user_mode = 1;
690 ucontext = to_ucontext(ibpd->uobject->context);
691
692 if (req.user_wqe_buffers) {
693 spin_lock_irqsave(
694 &ucontext->qp_reg_mem_list_lock, flags);
695 iwqp->iwpbl = i40iw_get_pbl(
696 (unsigned long)req.user_wqe_buffers,
697 &ucontext->qp_reg_mem_list);
698 spin_unlock_irqrestore(
699 &ucontext->qp_reg_mem_list_lock, flags);
700
701 if (!iwqp->iwpbl) {
702 err_code = -ENODATA;
703 i40iw_pr_err("no pbl info\n");
704 goto error;
705 }
706 }
707 }
708 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
709 } else {
710 err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
711 }
712
713 if (err_code) {
714 i40iw_pr_err("setup qp failed\n");
715 goto error;
716 }
717
718 init_info.type = I40IW_QP_TYPE_IWARP;
719 ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
720 if (ret) {
721 err_code = -EPROTO;
722 i40iw_pr_err("qp_init fail\n");
723 goto error;
724 }
725 ctx_info = &iwqp->ctx_info;
726 iwarp_info = &iwqp->iwarp_info;
727 iwarp_info->rd_enable = true;
728 iwarp_info->wr_rdresp_en = true;
Ismail, Mustafab7aee852016-04-18 10:33:06 -0500729 if (!iwqp->user_mode) {
730 iwarp_info->fast_reg_en = true;
Faisal Latifd3749842016-01-20 13:40:09 -0600731 iwarp_info->priv_mode_en = true;
Ismail, Mustafab7aee852016-04-18 10:33:06 -0500732 }
Faisal Latifd3749842016-01-20 13:40:09 -0600733 iwarp_info->ddp_ver = 1;
734 iwarp_info->rdmap_ver = 1;
735
736 ctx_info->iwarp_info_valid = true;
737 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
738 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
739 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
740 ctx_info->push_mode_en = false;
741 } else {
742 ctx_info->push_mode_en = true;
743 ctx_info->push_idx = qp->push_idx;
744 }
745
746 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
747 (u64 *)iwqp->host_ctx.va,
748 ctx_info);
749 ctx_info->iwarp_info_valid = false;
750 cqp_request = i40iw_get_cqp_request(iwcqp, true);
751 if (!cqp_request) {
752 err_code = -ENOMEM;
753 goto error;
754 }
755 cqp_info = &cqp_request->info;
756 qp_info = &cqp_request->info.in.u.qp_create.info;
757
758 memset(qp_info, 0, sizeof(*qp_info));
759
760 qp_info->cq_num_valid = true;
761 qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
762
763 cqp_info->cqp_cmd = OP_QP_CREATE;
764 cqp_info->post_sq = 1;
765 cqp_info->in.u.qp_create.qp = qp;
766 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
767 ret = i40iw_handle_cqp_op(iwdev, cqp_request);
768 if (ret) {
769 i40iw_pr_err("CQP-OP QP create fail");
770 err_code = -EACCES;
771 goto error;
772 }
773
774 i40iw_add_ref(&iwqp->ibqp);
775 spin_lock_init(&iwqp->lock);
776 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
777 iwdev->qp_table[qp_num] = iwqp;
778 i40iw_add_pdusecount(iwqp->iwpd);
779 if (ibpd->uobject && udata) {
780 memset(&uresp, 0, sizeof(uresp));
781 uresp.actual_sq_size = sq_size;
782 uresp.actual_rq_size = rq_size;
783 uresp.qp_id = qp_num;
784 uresp.push_idx = qp->push_idx;
785 err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
786 if (err_code) {
787 i40iw_pr_err("copy_to_udata failed\n");
788 i40iw_destroy_qp(&iwqp->ibqp);
789 /* let the completion of the qp destroy free the qp */
790 return ERR_PTR(err_code);
791 }
792 }
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -0500793 init_completion(&iwqp->sq_drained);
794 init_completion(&iwqp->rq_drained);
Faisal Latifd3749842016-01-20 13:40:09 -0600795
796 return &iwqp->ibqp;
797error:
798 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
Faisal Latifd3749842016-01-20 13:40:09 -0600799 return ERR_PTR(err_code);
800}
801
802/**
803 * i40iw_query - query qp attributes
804 * @ibqp: qp pointer
805 * @attr: attributes pointer
806 * @attr_mask: Not used
807 * @init_attr: qp attributes to return
808 */
809static int i40iw_query_qp(struct ib_qp *ibqp,
810 struct ib_qp_attr *attr,
811 int attr_mask,
812 struct ib_qp_init_attr *init_attr)
813{
814 struct i40iw_qp *iwqp = to_iwqp(ibqp);
815 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
816
817 attr->qp_access_flags = 0;
818 attr->cap.max_send_wr = qp->qp_uk.sq_size;
819 attr->cap.max_recv_wr = qp->qp_uk.rq_size;
Faisal Latifd3749842016-01-20 13:40:09 -0600820 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
Henry Orosco01d0b362016-11-09 21:26:39 -0600821 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
822 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
Faisal Latifd3749842016-01-20 13:40:09 -0600823 init_attr->event_handler = iwqp->ibqp.event_handler;
824 init_attr->qp_context = iwqp->ibqp.qp_context;
825 init_attr->send_cq = iwqp->ibqp.send_cq;
826 init_attr->recv_cq = iwqp->ibqp.recv_cq;
827 init_attr->srq = iwqp->ibqp.srq;
828 init_attr->cap = attr->cap;
829 return 0;
830}
831
832/**
833 * i40iw_hw_modify_qp - setup cqp for modify qp
834 * @iwdev: iwarp device
835 * @iwqp: qp ptr (user or kernel)
836 * @info: info for modify qp
837 * @wait: flag to wait or not for modify qp completion
838 */
839void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
840 struct i40iw_modify_qp_info *info, bool wait)
841{
842 enum i40iw_status_code status;
843 struct i40iw_cqp_request *cqp_request;
844 struct cqp_commands_info *cqp_info;
845 struct i40iw_modify_qp_info *m_info;
846
847 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
848 if (!cqp_request)
849 return;
850
851 cqp_info = &cqp_request->info;
852 m_info = &cqp_info->in.u.qp_modify.info;
853 memcpy(m_info, info, sizeof(*m_info));
854 cqp_info->cqp_cmd = OP_QP_MODIFY;
855 cqp_info->post_sq = 1;
856 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
857 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
858 status = i40iw_handle_cqp_op(iwdev, cqp_request);
859 if (status)
860 i40iw_pr_err("CQP-OP Modify QP fail");
861}
862
863/**
864 * i40iw_modify_qp - modify qp request
865 * @ibqp: qp's pointer for modify
866 * @attr: access attributes
867 * @attr_mask: state mask
868 * @udata: user data
869 */
870int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
871 int attr_mask, struct ib_udata *udata)
872{
873 struct i40iw_qp *iwqp = to_iwqp(ibqp);
874 struct i40iw_device *iwdev = iwqp->iwdev;
875 struct i40iw_qp_host_ctx_info *ctx_info;
876 struct i40iwarp_offload_info *iwarp_info;
877 struct i40iw_modify_qp_info info;
878 u8 issue_modify_qp = 0;
879 u8 dont_wait = 0;
880 u32 err;
881 unsigned long flags;
882
883 memset(&info, 0, sizeof(info));
884 ctx_info = &iwqp->ctx_info;
885 iwarp_info = &iwqp->iwarp_info;
886
887 spin_lock_irqsave(&iwqp->lock, flags);
888
889 if (attr_mask & IB_QP_STATE) {
890 switch (attr->qp_state) {
891 case IB_QPS_INIT:
892 case IB_QPS_RTR:
893 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
894 err = -EINVAL;
895 goto exit;
896 }
897 if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
898 info.next_iwarp_state = I40IW_QP_STATE_IDLE;
899 issue_modify_qp = 1;
900 }
901 break;
902 case IB_QPS_RTS:
903 if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
904 (!iwqp->cm_id)) {
905 err = -EINVAL;
906 goto exit;
907 }
908
909 issue_modify_qp = 1;
910 iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
911 iwqp->hte_added = 1;
912 info.next_iwarp_state = I40IW_QP_STATE_RTS;
913 info.tcp_ctx_valid = true;
914 info.ord_valid = true;
915 info.arp_cache_idx_valid = true;
916 info.cq_num_valid = true;
917 break;
918 case IB_QPS_SQD:
919 if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
920 err = 0;
921 goto exit;
922 }
923 if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
924 (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
925 err = 0;
926 goto exit;
927 }
928 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
929 err = -EINVAL;
930 goto exit;
931 }
932 info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
933 issue_modify_qp = 1;
934 break;
935 case IB_QPS_SQE:
936 if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
937 err = -EINVAL;
938 goto exit;
939 }
940 info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
941 issue_modify_qp = 1;
942 break;
943 case IB_QPS_ERR:
944 case IB_QPS_RESET:
945 if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
946 err = -EINVAL;
947 goto exit;
948 }
949 if (iwqp->sc_qp.term_flags)
950 del_timer(&iwqp->terminate_timer);
951 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
952 if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
953 iwdev->iw_status &&
954 (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
955 info.reset_tcp_conn = true;
956 else
957 dont_wait = 1;
958 issue_modify_qp = 1;
959 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
960 break;
961 default:
962 err = -EINVAL;
963 goto exit;
964 }
965
966 iwqp->ibqp_state = attr->qp_state;
967
968 if (issue_modify_qp)
969 iwqp->iwarp_state = info.next_iwarp_state;
970 else
971 info.next_iwarp_state = iwqp->iwarp_state;
972 }
973 if (attr_mask & IB_QP_ACCESS_FLAGS) {
974 ctx_info->iwarp_info_valid = true;
975 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
976 iwarp_info->wr_rdresp_en = true;
977 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
978 iwarp_info->wr_rdresp_en = true;
979 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
980 iwarp_info->rd_enable = true;
981 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
982 iwarp_info->bind_en = true;
983
984 if (iwqp->user_mode) {
985 iwarp_info->rd_enable = true;
986 iwarp_info->wr_rdresp_en = true;
987 iwarp_info->priv_mode_en = false;
988 }
989 }
990
991 if (ctx_info->iwarp_info_valid) {
992 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
993 int ret;
994
995 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
996 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
997 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
998 (u64 *)iwqp->host_ctx.va,
999 ctx_info);
1000 if (ret) {
1001 i40iw_pr_err("setting QP context\n");
1002 err = -EINVAL;
1003 goto exit;
1004 }
1005 }
1006
1007 spin_unlock_irqrestore(&iwqp->lock, flags);
1008
1009 if (issue_modify_qp)
1010 i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
1011
1012 if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
1013 if (dont_wait) {
1014 if (iwqp->cm_id && iwqp->hw_tcp_state) {
1015 spin_lock_irqsave(&iwqp->lock, flags);
1016 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1017 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1018 spin_unlock_irqrestore(&iwqp->lock, flags);
1019 }
1020 }
1021 }
1022 return 0;
1023exit:
1024 spin_unlock_irqrestore(&iwqp->lock, flags);
1025 return err;
1026}
1027
1028/**
1029 * cq_free_resources - free up recources for cq
1030 * @iwdev: iwarp device
1031 * @iwcq: cq ptr
1032 */
1033static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1034{
1035 struct i40iw_sc_cq *cq = &iwcq->sc_cq;
1036
1037 if (!iwcq->user_mode)
1038 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1039 i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1040}
1041
1042/**
1043 * cq_wq_destroy - send cq destroy cqp
1044 * @iwdev: iwarp device
1045 * @cq: hardware control cq
1046 */
1047static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
1048{
1049 enum i40iw_status_code status;
1050 struct i40iw_cqp_request *cqp_request;
1051 struct cqp_commands_info *cqp_info;
1052
1053 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1054 if (!cqp_request)
1055 return;
1056
1057 cqp_info = &cqp_request->info;
1058
1059 cqp_info->cqp_cmd = OP_CQ_DESTROY;
1060 cqp_info->post_sq = 1;
1061 cqp_info->in.u.cq_destroy.cq = cq;
1062 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1063 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1064 if (status)
1065 i40iw_pr_err("CQP-OP Destroy QP fail");
1066}
1067
1068/**
1069 * i40iw_destroy_cq - destroy cq
1070 * @ib_cq: cq pointer
1071 */
1072static int i40iw_destroy_cq(struct ib_cq *ib_cq)
1073{
1074 struct i40iw_cq *iwcq;
1075 struct i40iw_device *iwdev;
1076 struct i40iw_sc_cq *cq;
1077
1078 if (!ib_cq) {
1079 i40iw_pr_err("ib_cq == NULL\n");
1080 return 0;
1081 }
1082
1083 iwcq = to_iwcq(ib_cq);
1084 iwdev = to_iwdev(ib_cq->device);
1085 cq = &iwcq->sc_cq;
1086 cq_wq_destroy(iwdev, cq);
1087 cq_free_resources(iwdev, iwcq);
1088 kfree(iwcq);
1089 return 0;
1090}
1091
1092/**
1093 * i40iw_create_cq - create cq
1094 * @ibdev: device pointer from stack
1095 * @attr: attributes for cq
1096 * @context: user context created during alloc
1097 * @udata: user data
1098 */
1099static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1100 const struct ib_cq_init_attr *attr,
1101 struct ib_ucontext *context,
1102 struct ib_udata *udata)
1103{
1104 struct i40iw_device *iwdev = to_iwdev(ibdev);
1105 struct i40iw_cq *iwcq;
1106 struct i40iw_pbl *iwpbl;
1107 u32 cq_num = 0;
1108 struct i40iw_sc_cq *cq;
1109 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1110 struct i40iw_cq_init_info info;
1111 enum i40iw_status_code status;
1112 struct i40iw_cqp_request *cqp_request;
1113 struct cqp_commands_info *cqp_info;
1114 struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1115 unsigned long flags;
1116 int err_code;
1117 int entries = attr->cqe;
1118
1119 if (entries > iwdev->max_cqe)
1120 return ERR_PTR(-EINVAL);
1121
1122 iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1123 if (!iwcq)
1124 return ERR_PTR(-ENOMEM);
1125
1126 memset(&info, 0, sizeof(info));
1127
1128 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1129 iwdev->max_cq, &cq_num,
1130 &iwdev->next_cq);
1131 if (err_code)
1132 goto error;
1133
1134 cq = &iwcq->sc_cq;
1135 cq->back_cq = (void *)iwcq;
1136 spin_lock_init(&iwcq->lock);
1137
1138 info.dev = dev;
1139 ukinfo->cq_size = max(entries, 4);
1140 ukinfo->cq_id = cq_num;
1141 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1142 info.ceqe_mask = 0;
Henry Oroscoe69c5092016-11-09 21:24:48 -06001143 if (attr->comp_vector < iwdev->ceqs_count)
1144 info.ceq_id = attr->comp_vector;
Faisal Latifd3749842016-01-20 13:40:09 -06001145 info.ceq_id_valid = true;
1146 info.ceqe_mask = 1;
1147 info.type = I40IW_CQ_TYPE_IWARP;
1148 if (context) {
1149 struct i40iw_ucontext *ucontext;
1150 struct i40iw_create_cq_req req;
1151 struct i40iw_cq_mr *cqmr;
1152
1153 memset(&req, 0, sizeof(req));
1154 iwcq->user_mode = true;
1155 ucontext = to_ucontext(context);
1156 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
1157 goto cq_free_resources;
1158
1159 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1160 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
1161 &ucontext->cq_reg_mem_list);
1162 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1163 if (!iwpbl) {
1164 err_code = -EPROTO;
1165 goto cq_free_resources;
1166 }
1167
1168 iwcq->iwpbl = iwpbl;
1169 iwcq->cq_mem_size = 0;
1170 cqmr = &iwpbl->cq_mr;
1171 info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
1172 if (iwpbl->pbl_allocated) {
1173 info.virtual_map = true;
1174 info.pbl_chunk_size = 1;
1175 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1176 } else {
1177 info.cq_base_pa = cqmr->cq_pbl.addr;
1178 }
1179 } else {
1180 /* Kmode allocations */
1181 int rsize;
1182 int shadow;
1183
1184 rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
1185 rsize = round_up(rsize, 256);
1186 shadow = I40IW_SHADOW_AREA_SIZE << 3;
1187 status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
1188 rsize + shadow, 256);
1189 if (status) {
1190 err_code = -ENOMEM;
1191 goto cq_free_resources;
1192 }
1193 ukinfo->cq_base = iwcq->kmem.va;
1194 info.cq_base_pa = iwcq->kmem.pa;
1195 info.shadow_area_pa = info.cq_base_pa + rsize;
1196 ukinfo->shadow_area = iwcq->kmem.va + rsize;
1197 }
1198
1199 if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
1200 i40iw_pr_err("init cq fail\n");
1201 err_code = -EPROTO;
1202 goto cq_free_resources;
1203 }
1204
1205 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1206 if (!cqp_request) {
1207 err_code = -ENOMEM;
1208 goto cq_free_resources;
1209 }
1210
1211 cqp_info = &cqp_request->info;
1212 cqp_info->cqp_cmd = OP_CQ_CREATE;
1213 cqp_info->post_sq = 1;
1214 cqp_info->in.u.cq_create.cq = cq;
1215 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1216 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1217 if (status) {
1218 i40iw_pr_err("CQP-OP Create QP fail");
1219 err_code = -EPROTO;
1220 goto cq_free_resources;
1221 }
1222
1223 if (context) {
1224 struct i40iw_create_cq_resp resp;
1225
1226 memset(&resp, 0, sizeof(resp));
1227 resp.cq_id = info.cq_uk_init_info.cq_id;
1228 resp.cq_size = info.cq_uk_init_info.cq_size;
1229 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1230 i40iw_pr_err("copy to user data\n");
1231 err_code = -EPROTO;
1232 goto cq_destroy;
1233 }
1234 }
1235
1236 return (struct ib_cq *)iwcq;
1237
1238cq_destroy:
1239 cq_wq_destroy(iwdev, cq);
1240cq_free_resources:
1241 cq_free_resources(iwdev, iwcq);
1242error:
1243 kfree(iwcq);
1244 return ERR_PTR(err_code);
1245}
1246
1247/**
1248 * i40iw_get_user_access - get hw access from IB access
1249 * @acc: IB access to return hw access
1250 */
1251static inline u16 i40iw_get_user_access(int acc)
1252{
1253 u16 access = 0;
1254
1255 access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
1256 access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
1257 access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
1258 access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
1259 return access;
1260}
1261
1262/**
1263 * i40iw_free_stag - free stag resource
1264 * @iwdev: iwarp device
1265 * @stag: stag to free
1266 */
1267static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1268{
1269 u32 stag_idx;
1270
1271 stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1272 i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
1273}
1274
1275/**
1276 * i40iw_create_stag - create random stag
1277 * @iwdev: iwarp device
1278 */
1279static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1280{
1281 u32 stag = 0;
1282 u32 stag_index = 0;
1283 u32 next_stag_index;
1284 u32 driver_key;
1285 u32 random;
1286 u8 consumer_key;
1287 int ret;
1288
1289 get_random_bytes(&random, sizeof(random));
1290 consumer_key = (u8)random;
1291
1292 driver_key = random & ~iwdev->mr_stagmask;
1293 next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1294 next_stag_index %= iwdev->max_mr;
1295
1296 ret = i40iw_alloc_resource(iwdev,
1297 iwdev->allocated_mrs, iwdev->max_mr,
1298 &stag_index, &next_stag_index);
1299 if (!ret) {
1300 stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
1301 stag |= driver_key;
1302 stag += (u32)consumer_key;
1303 }
1304 return stag;
1305}
1306
1307/**
1308 * i40iw_next_pbl_addr - Get next pbl address
Faisal Latifd3749842016-01-20 13:40:09 -06001309 * @pbl: pointer to a pble
1310 * @pinfo: info pointer
1311 * @idx: index
1312 */
Henry Oroscof26c7c82016-11-30 14:57:40 -06001313static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
Faisal Latifd3749842016-01-20 13:40:09 -06001314 struct i40iw_pble_info **pinfo,
1315 u32 *idx)
1316{
1317 *idx += 1;
1318 if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
1319 return ++pbl;
1320 *idx = 0;
1321 (*pinfo)++;
1322 return (u64 *)(*pinfo)->addr;
1323}
1324
1325/**
1326 * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1327 * @iwmr: iwmr for IB's user page addresses
1328 * @pbl: ple pointer to save 1 level or 0 level pble
1329 * @level: indicated level 0, 1 or 2
1330 */
1331static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
1332 u64 *pbl,
1333 enum i40iw_pble_level level)
1334{
1335 struct ib_umem *region = iwmr->region;
1336 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1337 int chunk_pages, entry, pg_shift, i;
1338 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1339 struct i40iw_pble_info *pinfo;
1340 struct scatterlist *sg;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001341 u64 pg_addr = 0;
Faisal Latifd3749842016-01-20 13:40:09 -06001342 u32 idx = 0;
1343
1344 pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001345
Faisal Latifd3749842016-01-20 13:40:09 -06001346 pg_shift = ffs(region->page_size) - 1;
1347 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1348 chunk_pages = sg_dma_len(sg) >> pg_shift;
1349 if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
1350 !iwpbl->qp_mr.sq_page)
1351 iwpbl->qp_mr.sq_page = sg_page(sg);
1352 for (i = 0; i < chunk_pages; i++) {
Henry Oroscof26c7c82016-11-30 14:57:40 -06001353 pg_addr = sg_dma_address(sg) + region->page_size * i;
1354
1355 if ((entry + i) == 0)
1356 *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
1357 else if (!(pg_addr & ~iwmr->page_msk))
1358 *pbl = cpu_to_le64(pg_addr);
1359 else
1360 continue;
1361 pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
1362 }
1363 }
1364}
1365
1366/**
1367 * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
1368 * @addr: virtual address
1369 * @iwmr: mr pointer for this memory registration
1370 */
1371static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
1372{
1373 struct vm_area_struct *vma;
1374 struct hstate *h;
1375
1376 vma = find_vma(current->mm, addr);
1377 if (vma && is_vm_hugetlb_page(vma)) {
1378 h = hstate_vma(vma);
1379 if (huge_page_size(h) == 0x200000) {
1380 iwmr->page_size = huge_page_size(h);
1381 iwmr->page_msk = huge_page_mask(h);
Faisal Latifd3749842016-01-20 13:40:09 -06001382 }
1383 }
1384}
1385
1386/**
Henry Oroscob6a529d2016-11-30 14:56:14 -06001387 * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
1388 * @arr: lvl1 pbl array
1389 * @npages: page count
1390 * pg_size: page size
1391 *
1392 */
1393static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1394{
1395 u32 pg_idx;
1396
1397 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1398 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1399 return false;
1400 }
1401 return true;
1402}
1403
1404/**
1405 * i40iw_check_mr_contiguous - check if MR is physically contiguous
1406 * @palloc: pbl allocation struct
1407 * pg_size: page size
1408 */
1409static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
1410{
1411 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
1412 struct i40iw_pble_info *leaf = lvl2->leaf;
1413 u64 *arr = NULL;
1414 u64 *start_addr = NULL;
1415 int i;
1416 bool ret;
1417
1418 if (palloc->level == I40IW_LEVEL_1) {
1419 arr = (u64 *)palloc->level1.addr;
1420 ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
1421 return ret;
1422 }
1423
1424 start_addr = (u64 *)leaf->addr;
1425
1426 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1427 arr = (u64 *)leaf->addr;
1428 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1429 return false;
1430 ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
1431 if (!ret)
1432 return false;
1433 }
1434
1435 return true;
1436}
1437
1438/**
Faisal Latifd3749842016-01-20 13:40:09 -06001439 * i40iw_setup_pbles - copy user pg address to pble's
1440 * @iwdev: iwarp device
1441 * @iwmr: mr pointer for this memory registration
Henry Oroscob6a529d2016-11-30 14:56:14 -06001442 * @use_pbles: flag if to use pble's
Faisal Latifd3749842016-01-20 13:40:09 -06001443 */
1444static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1445 struct i40iw_mr *iwmr,
1446 bool use_pbles)
1447{
1448 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1449 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1450 struct i40iw_pble_info *pinfo;
1451 u64 *pbl;
1452 enum i40iw_status_code status;
1453 enum i40iw_pble_level level = I40IW_LEVEL_1;
1454
Faisal Latifd3749842016-01-20 13:40:09 -06001455 if (use_pbles) {
1456 mutex_lock(&iwdev->pbl_mutex);
1457 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1458 mutex_unlock(&iwdev->pbl_mutex);
1459 if (status)
1460 return -ENOMEM;
1461
1462 iwpbl->pbl_allocated = true;
1463 level = palloc->level;
1464 pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
1465 pbl = (u64 *)pinfo->addr;
1466 } else {
1467 pbl = iwmr->pgaddrmem;
1468 }
1469
1470 i40iw_copy_user_pgaddrs(iwmr, pbl, level);
Henry Oroscob6a529d2016-11-30 14:56:14 -06001471
1472 if (use_pbles)
1473 iwmr->pgaddrmem[0] = *pbl;
1474
Faisal Latifd3749842016-01-20 13:40:09 -06001475 return 0;
1476}
1477
1478/**
1479 * i40iw_handle_q_mem - handle memory for qp and cq
1480 * @iwdev: iwarp device
1481 * @req: information for q memory management
1482 * @iwpbl: pble struct
1483 * @use_pbles: flag to use pble
1484 */
1485static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1486 struct i40iw_mem_reg_req *req,
1487 struct i40iw_pbl *iwpbl,
1488 bool use_pbles)
1489{
1490 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1491 struct i40iw_mr *iwmr = iwpbl->iwmr;
1492 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
1493 struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
1494 struct i40iw_hmc_pble *hmc_p;
1495 u64 *arr = iwmr->pgaddrmem;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001496 u32 pg_size;
Faisal Latifd3749842016-01-20 13:40:09 -06001497 int err;
1498 int total;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001499 bool ret = true;
Faisal Latifd3749842016-01-20 13:40:09 -06001500
1501 total = req->sq_pages + req->rq_pages + req->cq_pages;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001502 pg_size = iwmr->page_size;
Faisal Latifd3749842016-01-20 13:40:09 -06001503
1504 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1505 if (err)
1506 return err;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001507
Faisal Latifd3749842016-01-20 13:40:09 -06001508 if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
1509 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1510 iwpbl->pbl_allocated = false;
1511 return -ENOMEM;
1512 }
1513
1514 if (use_pbles)
1515 arr = (u64 *)palloc->level1.addr;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001516
1517 if (iwmr->type == IW_MEMREG_TYPE_QP) {
Faisal Latifd3749842016-01-20 13:40:09 -06001518 hmc_p = &qpmr->sq_pbl;
1519 qpmr->shadow = (dma_addr_t)arr[total];
Henry Oroscob6a529d2016-11-30 14:56:14 -06001520
Faisal Latifd3749842016-01-20 13:40:09 -06001521 if (use_pbles) {
Henry Oroscob6a529d2016-11-30 14:56:14 -06001522 ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
1523 if (ret)
1524 ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
1525 }
1526
1527 if (!ret) {
Faisal Latifd3749842016-01-20 13:40:09 -06001528 hmc_p->idx = palloc->level1.idx;
1529 hmc_p = &qpmr->rq_pbl;
1530 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1531 } else {
1532 hmc_p->addr = arr[0];
1533 hmc_p = &qpmr->rq_pbl;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001534 hmc_p->addr = arr[req->sq_pages];
Faisal Latifd3749842016-01-20 13:40:09 -06001535 }
1536 } else { /* CQ */
1537 hmc_p = &cqmr->cq_pbl;
1538 cqmr->shadow = (dma_addr_t)arr[total];
Henry Oroscob6a529d2016-11-30 14:56:14 -06001539
Faisal Latifd3749842016-01-20 13:40:09 -06001540 if (use_pbles)
Henry Oroscob6a529d2016-11-30 14:56:14 -06001541 ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
1542
1543 if (!ret)
Faisal Latifd3749842016-01-20 13:40:09 -06001544 hmc_p->idx = palloc->level1.idx;
1545 else
1546 hmc_p->addr = arr[0];
1547 }
Henry Oroscob6a529d2016-11-30 14:56:14 -06001548
1549 if (use_pbles && ret) {
1550 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1551 iwpbl->pbl_allocated = false;
1552 }
1553
Faisal Latifd3749842016-01-20 13:40:09 -06001554 return err;
1555}
1556
1557/**
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001558 * i40iw_hw_alloc_stag - cqp command to allocate stag
1559 * @iwdev: iwarp device
1560 * @iwmr: iwarp mr pointer
1561 */
1562static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1563{
1564 struct i40iw_allocate_stag_info *info;
1565 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1566 enum i40iw_status_code status;
1567 int err = 0;
1568 struct i40iw_cqp_request *cqp_request;
1569 struct cqp_commands_info *cqp_info;
1570
1571 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1572 if (!cqp_request)
1573 return -ENOMEM;
1574
1575 cqp_info = &cqp_request->info;
1576 info = &cqp_info->in.u.alloc_stag.info;
1577 memset(info, 0, sizeof(*info));
1578 info->page_size = PAGE_SIZE;
1579 info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1580 info->pd_id = iwpd->sc_pd.pd_id;
1581 info->total_len = iwmr->length;
Shiraz Saleem8e0e7ae2016-06-27 16:52:14 -05001582 info->remote_access = true;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001583 cqp_info->cqp_cmd = OP_ALLOC_STAG;
1584 cqp_info->post_sq = 1;
1585 cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1586 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1587
1588 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1589 if (status) {
1590 err = -ENOMEM;
1591 i40iw_pr_err("CQP-OP MR Reg fail");
1592 }
1593 return err;
1594}
1595
1596/**
1597 * i40iw_alloc_mr - register stag for fast memory registration
1598 * @pd: ibpd pointer
1599 * @mr_type: memory for stag registrion
1600 * @max_num_sg: man number of pages
1601 */
1602static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
1603 enum ib_mr_type mr_type,
1604 u32 max_num_sg)
1605{
1606 struct i40iw_pd *iwpd = to_iwpd(pd);
1607 struct i40iw_device *iwdev = to_iwdev(pd->device);
1608 struct i40iw_pble_alloc *palloc;
1609 struct i40iw_pbl *iwpbl;
1610 struct i40iw_mr *iwmr;
1611 enum i40iw_status_code status;
1612 u32 stag;
1613 int err_code = -ENOMEM;
1614
1615 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1616 if (!iwmr)
1617 return ERR_PTR(-ENOMEM);
1618
1619 stag = i40iw_create_stag(iwdev);
1620 if (!stag) {
1621 err_code = -EOVERFLOW;
1622 goto err;
1623 }
1624 iwmr->stag = stag;
1625 iwmr->ibmr.rkey = stag;
1626 iwmr->ibmr.lkey = stag;
1627 iwmr->ibmr.pd = pd;
1628 iwmr->ibmr.device = pd->device;
1629 iwpbl = &iwmr->iwpbl;
1630 iwpbl->iwmr = iwmr;
1631 iwmr->type = IW_MEMREG_TYPE_MEM;
1632 palloc = &iwpbl->pble_alloc;
1633 iwmr->page_cnt = max_num_sg;
1634 mutex_lock(&iwdev->pbl_mutex);
1635 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1636 mutex_unlock(&iwdev->pbl_mutex);
Faisal Latifee23abd2016-06-14 16:54:17 -05001637 if (status)
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001638 goto err1;
1639
1640 if (palloc->level != I40IW_LEVEL_1)
1641 goto err2;
1642 err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1643 if (err_code)
1644 goto err2;
1645 iwpbl->pbl_allocated = true;
1646 i40iw_add_pdusecount(iwpd);
1647 return &iwmr->ibmr;
1648err2:
1649 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1650err1:
1651 i40iw_free_stag(iwdev, stag);
1652err:
1653 kfree(iwmr);
1654 return ERR_PTR(err_code);
1655}
1656
1657/**
1658 * i40iw_set_page - populate pbl list for fmr
1659 * @ibmr: ib mem to access iwarp mr pointer
1660 * @addr: page dma address fro pbl list
1661 */
1662static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
1663{
1664 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1665 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1666 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1667 u64 *pbl;
1668
1669 if (unlikely(iwmr->npages == iwmr->page_cnt))
1670 return -ENOMEM;
1671
1672 pbl = (u64 *)palloc->level1.addr;
1673 pbl[iwmr->npages++] = cpu_to_le64(addr);
1674 return 0;
1675}
1676
1677/**
1678 * i40iw_map_mr_sg - map of sg list for fmr
1679 * @ibmr: ib mem to access iwarp mr pointer
1680 * @sg: scatter gather list for fmr
1681 * @sg_nents: number of sg pages
1682 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001683static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001684 int sg_nents, unsigned int *sg_offset)
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001685{
1686 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1687
1688 iwmr->npages = 0;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001689 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001690}
1691
1692/**
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05001693 * i40iw_drain_sq - drain the send queue
1694 * @ibqp: ib qp pointer
1695 */
1696static void i40iw_drain_sq(struct ib_qp *ibqp)
1697{
1698 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1699 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1700
1701 if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
1702 wait_for_completion(&iwqp->sq_drained);
1703}
1704
1705/**
1706 * i40iw_drain_rq - drain the receive queue
1707 * @ibqp: ib qp pointer
1708 */
1709static void i40iw_drain_rq(struct ib_qp *ibqp)
1710{
1711 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1712 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1713
1714 if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
1715 wait_for_completion(&iwqp->rq_drained);
1716}
1717
1718/**
Faisal Latifd3749842016-01-20 13:40:09 -06001719 * i40iw_hwreg_mr - send cqp command for memory registration
1720 * @iwdev: iwarp device
1721 * @iwmr: iwarp mr pointer
1722 * @access: access for MR
1723 */
1724static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1725 struct i40iw_mr *iwmr,
1726 u16 access)
1727{
1728 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1729 struct i40iw_reg_ns_stag_info *stag_info;
1730 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1731 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1732 enum i40iw_status_code status;
1733 int err = 0;
1734 struct i40iw_cqp_request *cqp_request;
1735 struct cqp_commands_info *cqp_info;
1736
1737 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1738 if (!cqp_request)
1739 return -ENOMEM;
1740
1741 cqp_info = &cqp_request->info;
1742 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1743 memset(stag_info, 0, sizeof(*stag_info));
1744 stag_info->va = (void *)(unsigned long)iwpbl->user_base;
1745 stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1746 stag_info->stag_key = (u8)iwmr->stag;
1747 stag_info->total_len = iwmr->length;
1748 stag_info->access_rights = access;
1749 stag_info->pd_id = iwpd->sc_pd.pd_id;
1750 stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001751 stag_info->page_size = iwmr->page_size;
Faisal Latifd3749842016-01-20 13:40:09 -06001752
Henry Oroscob6a529d2016-11-30 14:56:14 -06001753 if (iwpbl->pbl_allocated) {
Faisal Latifd3749842016-01-20 13:40:09 -06001754 if (palloc->level == I40IW_LEVEL_1) {
1755 stag_info->first_pm_pbl_index = palloc->level1.idx;
1756 stag_info->chunk_size = 1;
1757 } else {
1758 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1759 stag_info->chunk_size = 3;
1760 }
1761 } else {
1762 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1763 }
1764
1765 cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
1766 cqp_info->post_sq = 1;
1767 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1768 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1769
1770 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1771 if (status) {
1772 err = -ENOMEM;
1773 i40iw_pr_err("CQP-OP MR Reg fail");
1774 }
1775 return err;
1776}
1777
1778/**
1779 * i40iw_reg_user_mr - Register a user memory region
1780 * @pd: ptr of pd
1781 * @start: virtual start address
1782 * @length: length of mr
1783 * @virt: virtual address
1784 * @acc: access of mr
1785 * @udata: user data
1786 */
1787static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1788 u64 start,
1789 u64 length,
1790 u64 virt,
1791 int acc,
1792 struct ib_udata *udata)
1793{
1794 struct i40iw_pd *iwpd = to_iwpd(pd);
1795 struct i40iw_device *iwdev = to_iwdev(pd->device);
1796 struct i40iw_ucontext *ucontext;
1797 struct i40iw_pble_alloc *palloc;
1798 struct i40iw_pbl *iwpbl;
1799 struct i40iw_mr *iwmr;
1800 struct ib_umem *region;
1801 struct i40iw_mem_reg_req req;
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001802 u64 pbl_depth = 0;
Faisal Latifd3749842016-01-20 13:40:09 -06001803 u32 stag = 0;
1804 u16 access;
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001805 u64 region_length;
Faisal Latifd3749842016-01-20 13:40:09 -06001806 bool use_pbles = false;
1807 unsigned long flags;
1808 int err = -ENOSYS;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001809 int ret;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001810 int pg_shift;
Faisal Latifd3749842016-01-20 13:40:09 -06001811
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001812 if (length > I40IW_MAX_MR_SIZE)
1813 return ERR_PTR(-EINVAL);
Faisal Latifd3749842016-01-20 13:40:09 -06001814 region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
1815 if (IS_ERR(region))
1816 return (struct ib_mr *)region;
1817
1818 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
1819 ib_umem_release(region);
1820 return ERR_PTR(-EFAULT);
1821 }
1822
1823 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1824 if (!iwmr) {
1825 ib_umem_release(region);
1826 return ERR_PTR(-ENOMEM);
1827 }
1828
1829 iwpbl = &iwmr->iwpbl;
1830 iwpbl->iwmr = iwmr;
1831 iwmr->region = region;
1832 iwmr->ibmr.pd = pd;
1833 iwmr->ibmr.device = pd->device;
1834 ucontext = to_ucontext(pd->uobject->context);
Henry Oroscof26c7c82016-11-30 14:57:40 -06001835
1836 iwmr->page_size = region->page_size;
1837 iwmr->page_msk = PAGE_MASK;
1838
1839 if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
1840 i40iw_set_hugetlb_values(start, iwmr);
1841
1842 region_length = region->length + (start & (iwmr->page_size - 1));
1843 pg_shift = ffs(iwmr->page_size) - 1;
1844 pbl_depth = region_length >> pg_shift;
1845 pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
Faisal Latifd3749842016-01-20 13:40:09 -06001846 iwmr->length = region->length;
1847
1848 iwpbl->user_base = virt;
1849 palloc = &iwpbl->pble_alloc;
1850
1851 iwmr->type = req.reg_type;
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001852 iwmr->page_cnt = (u32)pbl_depth;
Faisal Latifd3749842016-01-20 13:40:09 -06001853
1854 switch (req.reg_type) {
1855 case IW_MEMREG_TYPE_QP:
1856 use_pbles = ((req.sq_pages + req.rq_pages) > 2);
1857 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1858 if (err)
1859 goto error;
1860 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1861 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1862 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1863 break;
1864 case IW_MEMREG_TYPE_CQ:
1865 use_pbles = (req.cq_pages > 1);
1866 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1867 if (err)
1868 goto error;
1869
1870 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1871 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1872 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1873 break;
1874 case IW_MEMREG_TYPE_MEM:
Henry Oroscob6a529d2016-11-30 14:56:14 -06001875 use_pbles = (iwmr->page_cnt != 1);
Faisal Latifd3749842016-01-20 13:40:09 -06001876 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1877
Faisal Latifd3749842016-01-20 13:40:09 -06001878 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1879 if (err)
1880 goto error;
1881
Henry Oroscob6a529d2016-11-30 14:56:14 -06001882 if (use_pbles) {
Henry Oroscof26c7c82016-11-30 14:57:40 -06001883 ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
Henry Oroscob6a529d2016-11-30 14:56:14 -06001884 if (ret) {
1885 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1886 iwpbl->pbl_allocated = false;
1887 }
1888 }
1889
Faisal Latifd3749842016-01-20 13:40:09 -06001890 access |= i40iw_get_user_access(acc);
1891 stag = i40iw_create_stag(iwdev);
1892 if (!stag) {
1893 err = -ENOMEM;
1894 goto error;
1895 }
1896
1897 iwmr->stag = stag;
1898 iwmr->ibmr.rkey = stag;
1899 iwmr->ibmr.lkey = stag;
1900
1901 err = i40iw_hwreg_mr(iwdev, iwmr, access);
1902 if (err) {
1903 i40iw_free_stag(iwdev, stag);
1904 goto error;
1905 }
Henry Oroscof26c7c82016-11-30 14:57:40 -06001906
Faisal Latifd3749842016-01-20 13:40:09 -06001907 break;
1908 default:
1909 goto error;
1910 }
1911
1912 iwmr->type = req.reg_type;
1913 if (req.reg_type == IW_MEMREG_TYPE_MEM)
1914 i40iw_add_pdusecount(iwpd);
1915 return &iwmr->ibmr;
1916
1917error:
Henry Oroscob6a529d2016-11-30 14:56:14 -06001918 if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
Faisal Latifd3749842016-01-20 13:40:09 -06001919 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1920 ib_umem_release(region);
1921 kfree(iwmr);
1922 return ERR_PTR(err);
1923}
1924
1925/**
1926 * i40iw_reg_phys_mr - register kernel physical memory
1927 * @pd: ibpd pointer
1928 * @addr: physical address of memory to register
1929 * @size: size of memory to register
1930 * @acc: Access rights
1931 * @iova_start: start of virtual address for physical buffers
1932 */
1933struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
1934 u64 addr,
1935 u64 size,
1936 int acc,
1937 u64 *iova_start)
1938{
1939 struct i40iw_pd *iwpd = to_iwpd(pd);
1940 struct i40iw_device *iwdev = to_iwdev(pd->device);
1941 struct i40iw_pbl *iwpbl;
1942 struct i40iw_mr *iwmr;
1943 enum i40iw_status_code status;
1944 u32 stag;
1945 u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1946 int ret;
1947
1948 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1949 if (!iwmr)
1950 return ERR_PTR(-ENOMEM);
1951 iwmr->ibmr.pd = pd;
1952 iwmr->ibmr.device = pd->device;
1953 iwpbl = &iwmr->iwpbl;
1954 iwpbl->iwmr = iwmr;
1955 iwmr->type = IW_MEMREG_TYPE_MEM;
1956 iwpbl->user_base = *iova_start;
1957 stag = i40iw_create_stag(iwdev);
1958 if (!stag) {
1959 ret = -EOVERFLOW;
1960 goto err;
1961 }
1962 access |= i40iw_get_user_access(acc);
1963 iwmr->stag = stag;
1964 iwmr->ibmr.rkey = stag;
1965 iwmr->ibmr.lkey = stag;
1966 iwmr->page_cnt = 1;
1967 iwmr->pgaddrmem[0] = addr;
Mustafa Ismail342c3872016-07-12 11:48:40 -05001968 iwmr->length = size;
Faisal Latifd3749842016-01-20 13:40:09 -06001969 status = i40iw_hwreg_mr(iwdev, iwmr, access);
1970 if (status) {
1971 i40iw_free_stag(iwdev, stag);
1972 ret = -ENOMEM;
1973 goto err;
1974 }
1975
1976 i40iw_add_pdusecount(iwpd);
1977 return &iwmr->ibmr;
1978 err:
1979 kfree(iwmr);
1980 return ERR_PTR(ret);
1981}
1982
1983/**
1984 * i40iw_get_dma_mr - register physical mem
1985 * @pd: ptr of pd
1986 * @acc: access for memory
1987 */
1988static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
1989{
1990 u64 kva = 0;
1991
Mustafa Ismail342c3872016-07-12 11:48:40 -05001992 return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
Faisal Latifd3749842016-01-20 13:40:09 -06001993}
1994
1995/**
1996 * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
1997 * @iwmr: iwmr for IB's user page addresses
1998 * @ucontext: ptr to user context
1999 */
2000static void i40iw_del_memlist(struct i40iw_mr *iwmr,
2001 struct i40iw_ucontext *ucontext)
2002{
2003 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2004 unsigned long flags;
2005
2006 switch (iwmr->type) {
2007 case IW_MEMREG_TYPE_CQ:
2008 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2009 if (!list_empty(&ucontext->cq_reg_mem_list))
2010 list_del(&iwpbl->list);
2011 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2012 break;
2013 case IW_MEMREG_TYPE_QP:
2014 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2015 if (!list_empty(&ucontext->qp_reg_mem_list))
2016 list_del(&iwpbl->list);
2017 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2018 break;
2019 default:
2020 break;
2021 }
2022}
2023
2024/**
2025 * i40iw_dereg_mr - deregister mr
2026 * @ib_mr: mr ptr for dereg
2027 */
2028static int i40iw_dereg_mr(struct ib_mr *ib_mr)
2029{
2030 struct ib_pd *ibpd = ib_mr->pd;
2031 struct i40iw_pd *iwpd = to_iwpd(ibpd);
2032 struct i40iw_mr *iwmr = to_iwmr(ib_mr);
2033 struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
2034 enum i40iw_status_code status;
2035 struct i40iw_dealloc_stag_info *info;
2036 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2037 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
2038 struct i40iw_cqp_request *cqp_request;
2039 struct cqp_commands_info *cqp_info;
2040 u32 stag_idx;
2041
2042 if (iwmr->region)
2043 ib_umem_release(iwmr->region);
2044
2045 if (iwmr->type != IW_MEMREG_TYPE_MEM) {
2046 if (ibpd->uobject) {
2047 struct i40iw_ucontext *ucontext;
2048
2049 ucontext = to_ucontext(ibpd->uobject->context);
2050 i40iw_del_memlist(iwmr, ucontext);
2051 }
2052 if (iwpbl->pbl_allocated)
2053 i40iw_free_pble(iwdev->pble_rsrc, palloc);
Mustafa Ismail433c5812016-08-23 17:24:56 -05002054 kfree(iwmr);
Faisal Latifd3749842016-01-20 13:40:09 -06002055 return 0;
2056 }
2057
2058 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
2059 if (!cqp_request)
2060 return -ENOMEM;
2061
2062 cqp_info = &cqp_request->info;
2063 info = &cqp_info->in.u.dealloc_stag.info;
2064 memset(info, 0, sizeof(*info));
2065
2066 info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
2067 info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
2068 stag_idx = info->stag_idx;
2069 info->mr = true;
2070 if (iwpbl->pbl_allocated)
2071 info->dealloc_pbl = true;
2072
2073 cqp_info->cqp_cmd = OP_DEALLOC_STAG;
2074 cqp_info->post_sq = 1;
2075 cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
2076 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2077 status = i40iw_handle_cqp_op(iwdev, cqp_request);
2078 if (status)
2079 i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
2080 i40iw_rem_pdusecount(iwpd, iwdev);
2081 i40iw_free_stag(iwdev, iwmr->stag);
2082 if (iwpbl->pbl_allocated)
2083 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2084 kfree(iwmr);
2085 return 0;
2086}
2087
2088/**
2089 * i40iw_show_rev
2090 */
2091static ssize_t i40iw_show_rev(struct device *dev,
2092 struct device_attribute *attr, char *buf)
2093{
2094 struct i40iw_ib_device *iwibdev = container_of(dev,
2095 struct i40iw_ib_device,
2096 ibdev.dev);
2097 u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
2098
2099 return sprintf(buf, "%x\n", hw_rev);
2100}
2101
2102/**
Faisal Latifd3749842016-01-20 13:40:09 -06002103 * i40iw_show_hca
2104 */
2105static ssize_t i40iw_show_hca(struct device *dev,
2106 struct device_attribute *attr, char *buf)
2107{
2108 return sprintf(buf, "I40IW\n");
2109}
2110
2111/**
2112 * i40iw_show_board
2113 */
2114static ssize_t i40iw_show_board(struct device *dev,
2115 struct device_attribute *attr,
2116 char *buf)
2117{
2118 return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
2119}
2120
2121static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
Faisal Latifd3749842016-01-20 13:40:09 -06002122static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
2123static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
2124
2125static struct device_attribute *i40iw_dev_attributes[] = {
2126 &dev_attr_hw_rev,
Faisal Latifd3749842016-01-20 13:40:09 -06002127 &dev_attr_hca_type,
2128 &dev_attr_board_id
2129};
2130
2131/**
2132 * i40iw_copy_sg_list - copy sg list for qp
2133 * @sg_list: copied into sg_list
2134 * @sgl: copy from sgl
2135 * @num_sges: count of sg entries
2136 */
2137static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
2138{
2139 unsigned int i;
2140
2141 for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
2142 sg_list[i].tag_off = sgl[i].addr;
2143 sg_list[i].len = sgl[i].length;
2144 sg_list[i].stag = sgl[i].lkey;
2145 }
2146}
2147
2148/**
2149 * i40iw_post_send - kernel application wr
2150 * @ibqp: qp ptr for wr
2151 * @ib_wr: work request ptr
2152 * @bad_wr: return of bad wr if err
2153 */
2154static int i40iw_post_send(struct ib_qp *ibqp,
2155 struct ib_send_wr *ib_wr,
2156 struct ib_send_wr **bad_wr)
2157{
2158 struct i40iw_qp *iwqp;
2159 struct i40iw_qp_uk *ukqp;
2160 struct i40iw_post_sq_info info;
2161 enum i40iw_status_code ret;
2162 int err = 0;
2163 unsigned long flags;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002164 bool inv_stag;
Faisal Latifd3749842016-01-20 13:40:09 -06002165
2166 iwqp = (struct i40iw_qp *)ibqp;
2167 ukqp = &iwqp->sc_qp.qp_uk;
2168
2169 spin_lock_irqsave(&iwqp->lock, flags);
2170 while (ib_wr) {
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002171 inv_stag = false;
Faisal Latifd3749842016-01-20 13:40:09 -06002172 memset(&info, 0, sizeof(info));
2173 info.wr_id = (u64)(ib_wr->wr_id);
2174 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2175 info.signaled = true;
2176 if (ib_wr->send_flags & IB_SEND_FENCE)
2177 info.read_fence = true;
2178
2179 switch (ib_wr->opcode) {
2180 case IB_WR_SEND:
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002181 /* fall-through */
2182 case IB_WR_SEND_WITH_INV:
2183 if (ib_wr->opcode == IB_WR_SEND) {
2184 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2185 info.op_type = I40IW_OP_TYPE_SEND_SOL;
2186 else
2187 info.op_type = I40IW_OP_TYPE_SEND;
2188 } else {
2189 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2190 info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
2191 else
2192 info.op_type = I40IW_OP_TYPE_SEND_INV;
2193 }
Faisal Latifd3749842016-01-20 13:40:09 -06002194
2195 if (ib_wr->send_flags & IB_SEND_INLINE) {
2196 info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2197 info.op.inline_send.len = ib_wr->sg_list[0].length;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002198 ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
Faisal Latifd3749842016-01-20 13:40:09 -06002199 } else {
2200 info.op.send.num_sges = ib_wr->num_sge;
2201 info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002202 ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
Faisal Latifd3749842016-01-20 13:40:09 -06002203 }
2204
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002205 if (ret) {
2206 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2207 err = -ENOMEM;
2208 else
2209 err = -EINVAL;
2210 }
Faisal Latifd3749842016-01-20 13:40:09 -06002211 break;
2212 case IB_WR_RDMA_WRITE:
2213 info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
2214
2215 if (ib_wr->send_flags & IB_SEND_INLINE) {
2216 info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2217 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2218 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2219 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2220 info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
2221 ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
2222 } else {
2223 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2224 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2225 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2226 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2227 info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
2228 ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
2229 }
2230
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002231 if (ret) {
2232 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2233 err = -ENOMEM;
2234 else
2235 err = -EINVAL;
2236 }
Faisal Latifd3749842016-01-20 13:40:09 -06002237 break;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002238 case IB_WR_RDMA_READ_WITH_INV:
2239 inv_stag = true;
2240 /* fall-through*/
Faisal Latifd3749842016-01-20 13:40:09 -06002241 case IB_WR_RDMA_READ:
Shiraz Saleem6c2f7612016-04-22 14:14:27 -05002242 if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
2243 err = -EINVAL;
2244 break;
2245 }
Faisal Latifd3749842016-01-20 13:40:09 -06002246 info.op_type = I40IW_OP_TYPE_RDMA_READ;
2247 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2248 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2249 info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
2250 info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
2251 info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
2252 info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002253 ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002254 if (ret) {
2255 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2256 err = -ENOMEM;
2257 else
2258 err = -EINVAL;
2259 }
Faisal Latifd3749842016-01-20 13:40:09 -06002260 break;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002261 case IB_WR_LOCAL_INV:
2262 info.op_type = I40IW_OP_TYPE_INV_STAG;
2263 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2264 ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
2265 if (ret)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002266 err = -ENOMEM;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002267 break;
2268 case IB_WR_REG_MR:
2269 {
2270 struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002271 int flags = reg_wr(ib_wr)->access;
2272 struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2273 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2274 struct i40iw_fast_reg_stag_info info;
2275
Shiraz Saleem7748e492016-06-14 16:54:19 -05002276 memset(&info, 0, sizeof(info));
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002277 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2278 info.access_rights |= i40iw_get_user_access(flags);
2279 info.stag_key = reg_wr(ib_wr)->key & 0xff;
2280 info.stag_idx = reg_wr(ib_wr)->key >> 8;
Henry Oroscoe6779182016-11-09 21:33:32 -06002281 info.page_size = reg_wr(ib_wr)->mr->page_size;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002282 info.wr_id = ib_wr->wr_id;
2283
2284 info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2285 info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2286 info.total_len = iwmr->ibmr.length;
Shiraz Saleem7748e492016-06-14 16:54:19 -05002287 info.reg_addr_pa = *(u64 *)palloc->level1.addr;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002288 info.first_pm_pbl_index = palloc->level1.idx;
2289 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2290 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2291
Shiraz Saleem7748e492016-06-14 16:54:19 -05002292 if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2293 info.chunk_size = 1;
2294
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002295 ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2296 if (ret)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002297 err = -ENOMEM;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002298 break;
2299 }
Faisal Latifd3749842016-01-20 13:40:09 -06002300 default:
2301 err = -EINVAL;
2302 i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2303 ib_wr->opcode);
2304 break;
2305 }
2306
2307 if (err)
2308 break;
2309 ib_wr = ib_wr->next;
2310 }
2311
2312 if (err)
2313 *bad_wr = ib_wr;
2314 else
2315 ukqp->ops.iw_qp_post_wr(ukqp);
2316 spin_unlock_irqrestore(&iwqp->lock, flags);
2317
2318 return err;
2319}
2320
2321/**
2322 * i40iw_post_recv - post receive wr for kernel application
2323 * @ibqp: ib qp pointer
2324 * @ib_wr: work request for receive
2325 * @bad_wr: bad wr caused an error
2326 */
2327static int i40iw_post_recv(struct ib_qp *ibqp,
2328 struct ib_recv_wr *ib_wr,
2329 struct ib_recv_wr **bad_wr)
2330{
2331 struct i40iw_qp *iwqp;
2332 struct i40iw_qp_uk *ukqp;
2333 struct i40iw_post_rq_info post_recv;
2334 struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
2335 enum i40iw_status_code ret = 0;
2336 unsigned long flags;
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002337 int err = 0;
Faisal Latifd3749842016-01-20 13:40:09 -06002338
2339 iwqp = (struct i40iw_qp *)ibqp;
2340 ukqp = &iwqp->sc_qp.qp_uk;
2341
2342 memset(&post_recv, 0, sizeof(post_recv));
2343 spin_lock_irqsave(&iwqp->lock, flags);
2344 while (ib_wr) {
2345 post_recv.num_sges = ib_wr->num_sge;
2346 post_recv.wr_id = ib_wr->wr_id;
2347 i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2348 post_recv.sg_list = sg_list;
2349 ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
2350 if (ret) {
2351 i40iw_pr_err(" post_recv err %d\n", ret);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002352 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2353 err = -ENOMEM;
2354 else
2355 err = -EINVAL;
Faisal Latifd3749842016-01-20 13:40:09 -06002356 *bad_wr = ib_wr;
2357 goto out;
2358 }
2359 ib_wr = ib_wr->next;
2360 }
2361 out:
2362 spin_unlock_irqrestore(&iwqp->lock, flags);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002363 return err;
Faisal Latifd3749842016-01-20 13:40:09 -06002364}
2365
2366/**
2367 * i40iw_poll_cq - poll cq for completion (kernel apps)
2368 * @ibcq: cq to poll
2369 * @num_entries: number of entries to poll
2370 * @entry: wr of entry completed
2371 */
2372static int i40iw_poll_cq(struct ib_cq *ibcq,
2373 int num_entries,
2374 struct ib_wc *entry)
2375{
2376 struct i40iw_cq *iwcq;
2377 int cqe_count = 0;
2378 struct i40iw_cq_poll_info cq_poll_info;
2379 enum i40iw_status_code ret;
2380 struct i40iw_cq_uk *ukcq;
2381 struct i40iw_sc_qp *qp;
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05002382 struct i40iw_qp *iwqp;
Faisal Latifd3749842016-01-20 13:40:09 -06002383 unsigned long flags;
2384
2385 iwcq = (struct i40iw_cq *)ibcq;
2386 ukcq = &iwcq->sc_cq.cq_uk;
2387
2388 spin_lock_irqsave(&iwcq->lock, flags);
2389 while (cqe_count < num_entries) {
Mustafa Ismailb54143b2016-07-12 11:48:42 -05002390 ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
Faisal Latifd3749842016-01-20 13:40:09 -06002391 if (ret == I40IW_ERR_QUEUE_EMPTY) {
2392 break;
Tatyana Nikolovaf8a4e762016-04-22 14:14:28 -05002393 } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
2394 continue;
Faisal Latifd3749842016-01-20 13:40:09 -06002395 } else if (ret) {
2396 if (!cqe_count)
2397 cqe_count = -1;
2398 break;
2399 }
2400 entry->wc_flags = 0;
2401 entry->wr_id = cq_poll_info.wr_id;
Ismail, Mustafadf356302016-04-18 10:33:00 -05002402 if (cq_poll_info.error) {
Faisal Latifd3749842016-01-20 13:40:09 -06002403 entry->status = IB_WC_WR_FLUSH_ERR;
Ismail, Mustafadf356302016-04-18 10:33:00 -05002404 entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
2405 } else {
2406 entry->status = IB_WC_SUCCESS;
2407 }
Faisal Latifd3749842016-01-20 13:40:09 -06002408
2409 switch (cq_poll_info.op_type) {
2410 case I40IW_OP_TYPE_RDMA_WRITE:
2411 entry->opcode = IB_WC_RDMA_WRITE;
2412 break;
2413 case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
2414 case I40IW_OP_TYPE_RDMA_READ:
2415 entry->opcode = IB_WC_RDMA_READ;
2416 break;
2417 case I40IW_OP_TYPE_SEND_SOL:
2418 case I40IW_OP_TYPE_SEND_SOL_INV:
2419 case I40IW_OP_TYPE_SEND_INV:
2420 case I40IW_OP_TYPE_SEND:
2421 entry->opcode = IB_WC_SEND;
2422 break;
2423 case I40IW_OP_TYPE_REC:
2424 entry->opcode = IB_WC_RECV;
2425 break;
2426 default:
2427 entry->opcode = IB_WC_RECV;
2428 break;
2429 }
2430
Faisal Latifd3749842016-01-20 13:40:09 -06002431 entry->ex.imm_data = 0;
2432 qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
2433 entry->qp = (struct ib_qp *)qp->back_qp;
2434 entry->src_qp = cq_poll_info.qp_id;
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05002435 iwqp = (struct i40iw_qp *)qp->back_qp;
2436 if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2437 if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
2438 complete(&iwqp->sq_drained);
2439 if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
2440 complete(&iwqp->rq_drained);
2441 }
Faisal Latifd3749842016-01-20 13:40:09 -06002442 entry->byte_len = cq_poll_info.bytes_xfered;
2443 entry++;
2444 cqe_count++;
2445 }
2446 spin_unlock_irqrestore(&iwcq->lock, flags);
2447 return cqe_count;
2448}
2449
2450/**
2451 * i40iw_req_notify_cq - arm cq kernel application
2452 * @ibcq: cq to arm
2453 * @notify_flags: notofication flags
2454 */
2455static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2456 enum ib_cq_notify_flags notify_flags)
2457{
2458 struct i40iw_cq *iwcq;
2459 struct i40iw_cq_uk *ukcq;
Shiraz Saleem747f1c6d2016-06-14 16:54:16 -05002460 unsigned long flags;
2461 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
Faisal Latifd3749842016-01-20 13:40:09 -06002462
2463 iwcq = (struct i40iw_cq *)ibcq;
2464 ukcq = &iwcq->sc_cq.cq_uk;
Shiraz Saleem747f1c6d2016-06-14 16:54:16 -05002465 if (notify_flags == IB_CQ_SOLICITED)
2466 cq_notify = IW_CQ_COMPL_SOLICITED;
2467 spin_lock_irqsave(&iwcq->lock, flags);
Faisal Latifd3749842016-01-20 13:40:09 -06002468 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
Shiraz Saleem747f1c6d2016-06-14 16:54:16 -05002469 spin_unlock_irqrestore(&iwcq->lock, flags);
Faisal Latifd3749842016-01-20 13:40:09 -06002470 return 0;
2471}
2472
2473/**
2474 * i40iw_port_immutable - return port's immutable data
2475 * @ibdev: ib dev struct
2476 * @port_num: port number
2477 * @immutable: immutable data for the port return
2478 */
2479static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2480 struct ib_port_immutable *immutable)
2481{
2482 struct ib_port_attr attr;
2483 int err;
2484
2485 err = i40iw_query_port(ibdev, port_num, &attr);
2486
2487 if (err)
2488 return err;
2489
2490 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2491 immutable->gid_tbl_len = attr.gid_tbl_len;
2492 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2493
2494 return 0;
2495}
2496
Christoph Lameterb40f4752016-05-16 12:49:33 -05002497static const char * const i40iw_hw_stat_names[] = {
2498 // 32bit names
2499 [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2500 [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2501 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2502 [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2503 [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2504 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2505 [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2506 [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2507 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2508 // 64bit names
2509 [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2510 "ip4InOctets",
2511 [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2512 "ip4InPkts",
2513 [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2514 "ip4InReasmRqd",
2515 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2516 "ip4InMcastPkts",
2517 [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2518 "ip4OutOctets",
2519 [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2520 "ip4OutPkts",
2521 [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2522 "ip4OutSegRqd",
2523 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2524 "ip4OutMcastPkts",
2525 [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2526 "ip6InOctets",
2527 [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2528 "ip6InPkts",
2529 [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2530 "ip6InReasmRqd",
2531 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2532 "ip6InMcastPkts",
2533 [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2534 "ip6OutOctets",
2535 [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2536 "ip6OutPkts",
2537 [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2538 "ip6OutSegRqd",
2539 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2540 "ip6OutMcastPkts",
2541 [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2542 "tcpInSegs",
2543 [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2544 "tcpOutSegs",
2545 [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2546 "iwInRdmaReads",
2547 [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2548 "iwInRdmaSends",
2549 [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2550 "iwInRdmaWrites",
2551 [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2552 "iwOutRdmaReads",
2553 [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2554 "iwOutRdmaSends",
2555 [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2556 "iwOutRdmaWrites",
2557 [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2558 "iwRdmaBnd",
2559 [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2560 "iwRdmaInv"
2561};
2562
Ira Weinyf65c52c2016-06-15 02:21:59 -04002563static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str,
2564 size_t str_len)
2565{
2566 u32 firmware_version = I40IW_FW_VERSION;
2567
2568 snprintf(str, str_len, "%u.%u", firmware_version,
2569 (firmware_version & 0x000000ff));
2570}
2571
Faisal Latifd3749842016-01-20 13:40:09 -06002572/**
Christoph Lameterb40f4752016-05-16 12:49:33 -05002573 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2574 * @ibdev: device pointer from stack
2575 * @port_num: port number
Faisal Latifd3749842016-01-20 13:40:09 -06002576 */
Christoph Lameterb40f4752016-05-16 12:49:33 -05002577static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2578 u8 port_num)
2579{
2580 struct i40iw_device *iwdev = to_iwdev(ibdev);
2581 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2582 int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2583 I40IW_HW_STAT_INDEX_MAX_64;
2584 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2585
2586 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2587 (I40IW_HW_STAT_INDEX_MAX_32 +
2588 I40IW_HW_STAT_INDEX_MAX_64));
2589
2590 /*
2591 * PFs get the default update lifespan, but VFs only update once
2592 * per second
2593 */
2594 if (!dev->is_pf)
2595 lifespan = 1000;
2596 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2597 lifespan);
2598}
2599
2600/**
2601 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2602 * @ibdev: device pointer from stack
2603 * @stats: stats pointer from stack
2604 * @port_num: port number
2605 * @index: which hw counter the stack is requesting we update
2606 */
2607static int i40iw_get_hw_stats(struct ib_device *ibdev,
2608 struct rdma_hw_stats *stats,
2609 u8 port_num, int index)
Faisal Latifd3749842016-01-20 13:40:09 -06002610{
2611 struct i40iw_device *iwdev = to_iwdev(ibdev);
2612 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2613 struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
2614 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
Faisal Latifd3749842016-01-20 13:40:09 -06002615 unsigned long flags;
2616
Faisal Latifd3749842016-01-20 13:40:09 -06002617 if (dev->is_pf) {
2618 spin_lock_irqsave(&devstat->stats_lock, flags);
2619 devstat->ops.iw_hw_stat_read_all(devstat,
2620 &devstat->hw_stats);
2621 spin_unlock_irqrestore(&devstat->stats_lock, flags);
2622 } else {
Christoph Lameterb40f4752016-05-16 12:49:33 -05002623 if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2624 return -ENOSYS;
Faisal Latifd3749842016-01-20 13:40:09 -06002625 }
2626
Christoph Lameterb40f4752016-05-16 12:49:33 -05002627 memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
Faisal Latifd3749842016-01-20 13:40:09 -06002628
Christoph Lameterb40f4752016-05-16 12:49:33 -05002629 return stats->num_counters;
Faisal Latifd3749842016-01-20 13:40:09 -06002630}
2631
2632/**
2633 * i40iw_query_gid - Query port GID
2634 * @ibdev: device pointer from stack
2635 * @port: port number
2636 * @index: Entry index
2637 * @gid: Global ID
2638 */
2639static int i40iw_query_gid(struct ib_device *ibdev,
2640 u8 port,
2641 int index,
2642 union ib_gid *gid)
2643{
2644 struct i40iw_device *iwdev = to_iwdev(ibdev);
2645
2646 memset(gid->raw, 0, sizeof(gid->raw));
2647 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2648 return 0;
2649}
2650
2651/**
2652 * i40iw_modify_port Modify port properties
2653 * @ibdev: device pointer from stack
2654 * @port: port number
2655 * @port_modify_mask: mask for port modifications
2656 * @props: port properties
2657 */
2658static int i40iw_modify_port(struct ib_device *ibdev,
2659 u8 port,
2660 int port_modify_mask,
2661 struct ib_port_modify *props)
2662{
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002663 return -ENOSYS;
Faisal Latifd3749842016-01-20 13:40:09 -06002664}
2665
2666/**
2667 * i40iw_query_pkey - Query partition key
2668 * @ibdev: device pointer from stack
2669 * @port: port number
2670 * @index: index of pkey
2671 * @pkey: pointer to store the pkey
2672 */
2673static int i40iw_query_pkey(struct ib_device *ibdev,
2674 u8 port,
2675 u16 index,
2676 u16 *pkey)
2677{
2678 *pkey = 0;
2679 return 0;
2680}
2681
2682/**
2683 * i40iw_create_ah - create address handle
2684 * @ibpd: ptr of pd
2685 * @ah_attr: address handle attributes
2686 */
2687static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
2688 struct ib_ah_attr *attr)
2689{
2690 return ERR_PTR(-ENOSYS);
2691}
2692
2693/**
2694 * i40iw_destroy_ah - Destroy address handle
2695 * @ah: pointer to address handle
2696 */
2697static int i40iw_destroy_ah(struct ib_ah *ah)
2698{
2699 return -ENOSYS;
2700}
2701
2702/**
2703 * i40iw_init_rdma_device - initialization of iwarp device
2704 * @iwdev: iwarp device
2705 */
2706static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2707{
2708 struct i40iw_ib_device *iwibdev;
2709 struct net_device *netdev = iwdev->netdev;
2710 struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
2711
2712 iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
2713 if (!iwibdev) {
2714 i40iw_pr_err("iwdev == NULL\n");
2715 return NULL;
2716 }
2717 strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
2718 iwibdev->ibdev.owner = THIS_MODULE;
2719 iwdev->iwibdev = iwibdev;
2720 iwibdev->iwdev = iwdev;
2721
2722 iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
2723 ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
2724
2725 iwibdev->ibdev.uverbs_cmd_mask =
2726 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2727 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2728 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2729 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2730 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2731 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2732 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2733 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2734 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2735 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2736 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2737 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2738 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2739 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2740 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2741 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2742 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2743 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2744 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2745 (1ull << IB_USER_VERBS_CMD_POST_SEND);
2746 iwibdev->ibdev.phys_port_cnt = 1;
Henry Oroscoe69c5092016-11-09 21:24:48 -06002747 iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
Faisal Latifd3749842016-01-20 13:40:09 -06002748 iwibdev->ibdev.dma_device = &pcidev->dev;
2749 iwibdev->ibdev.dev.parent = &pcidev->dev;
2750 iwibdev->ibdev.query_port = i40iw_query_port;
2751 iwibdev->ibdev.modify_port = i40iw_modify_port;
2752 iwibdev->ibdev.query_pkey = i40iw_query_pkey;
2753 iwibdev->ibdev.query_gid = i40iw_query_gid;
2754 iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
2755 iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
2756 iwibdev->ibdev.mmap = i40iw_mmap;
2757 iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
2758 iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
2759 iwibdev->ibdev.create_qp = i40iw_create_qp;
2760 iwibdev->ibdev.modify_qp = i40iw_modify_qp;
2761 iwibdev->ibdev.query_qp = i40iw_query_qp;
2762 iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
2763 iwibdev->ibdev.create_cq = i40iw_create_cq;
2764 iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
2765 iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
2766 iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
2767 iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
Christoph Lameterb40f4752016-05-16 12:49:33 -05002768 iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
2769 iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
Faisal Latifd3749842016-01-20 13:40:09 -06002770 iwibdev->ibdev.query_device = i40iw_query_device;
2771 iwibdev->ibdev.create_ah = i40iw_create_ah;
2772 iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05002773 iwibdev->ibdev.drain_sq = i40iw_drain_sq;
2774 iwibdev->ibdev.drain_rq = i40iw_drain_rq;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002775 iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
2776 iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
Faisal Latifd3749842016-01-20 13:40:09 -06002777 iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
2778 if (!iwibdev->ibdev.iwcm) {
2779 ib_dealloc_device(&iwibdev->ibdev);
2780 i40iw_pr_err("iwcm == NULL\n");
2781 return NULL;
2782 }
2783
2784 iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
2785 iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
2786 iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
2787 iwibdev->ibdev.iwcm->connect = i40iw_connect;
2788 iwibdev->ibdev.iwcm->accept = i40iw_accept;
2789 iwibdev->ibdev.iwcm->reject = i40iw_reject;
2790 iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
2791 iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002792 memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
2793 sizeof(iwibdev->ibdev.iwcm->ifname));
Faisal Latifd3749842016-01-20 13:40:09 -06002794 iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
Ira Weinyf65c52c2016-06-15 02:21:59 -04002795 iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
Faisal Latifd3749842016-01-20 13:40:09 -06002796 iwibdev->ibdev.poll_cq = i40iw_poll_cq;
2797 iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
2798 iwibdev->ibdev.post_send = i40iw_post_send;
2799 iwibdev->ibdev.post_recv = i40iw_post_recv;
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002800
Faisal Latifd3749842016-01-20 13:40:09 -06002801 return iwibdev;
2802}
2803
2804/**
2805 * i40iw_port_ibevent - indicate port event
2806 * @iwdev: iwarp device
2807 */
2808void i40iw_port_ibevent(struct i40iw_device *iwdev)
2809{
2810 struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2811 struct ib_event event;
2812
2813 event.device = &iwibdev->ibdev;
2814 event.element.port_num = 1;
2815 event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2816 ib_dispatch_event(&event);
2817}
2818
2819/**
2820 * i40iw_unregister_rdma_device - unregister of iwarp from IB
2821 * @iwibdev: rdma device ptr
2822 */
2823static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
2824{
2825 int i;
2826
2827 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
2828 device_remove_file(&iwibdev->ibdev.dev,
2829 i40iw_dev_attributes[i]);
2830 ib_unregister_device(&iwibdev->ibdev);
2831}
2832
2833/**
2834 * i40iw_destroy_rdma_device - destroy rdma device and free resources
2835 * @iwibdev: IB device ptr
2836 */
2837void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
2838{
2839 if (!iwibdev)
2840 return;
2841
2842 i40iw_unregister_rdma_device(iwibdev);
2843 kfree(iwibdev->ibdev.iwcm);
2844 iwibdev->ibdev.iwcm = NULL;
2845 ib_dealloc_device(&iwibdev->ibdev);
2846}
2847
2848/**
2849 * i40iw_register_rdma_device - register iwarp device to IB
2850 * @iwdev: iwarp device
2851 */
2852int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2853{
2854 int i, ret;
2855 struct i40iw_ib_device *iwibdev;
2856
2857 iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2858 if (!iwdev->iwibdev)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002859 return -ENOMEM;
Faisal Latifd3749842016-01-20 13:40:09 -06002860 iwibdev = iwdev->iwibdev;
2861
2862 ret = ib_register_device(&iwibdev->ibdev, NULL);
2863 if (ret)
2864 goto error;
2865
2866 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
2867 ret =
2868 device_create_file(&iwibdev->ibdev.dev,
2869 i40iw_dev_attributes[i]);
2870 if (ret) {
2871 while (i > 0) {
2872 i--;
2873 device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
2874 }
2875 ib_unregister_device(&iwibdev->ibdev);
2876 goto error;
2877 }
2878 }
2879 return 0;
2880error:
2881 kfree(iwdev->iwibdev->ibdev.iwcm);
2882 iwdev->iwibdev->ibdev.iwcm = NULL;
2883 ib_dealloc_device(&iwdev->iwibdev->ibdev);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002884 return ret;
Faisal Latifd3749842016-01-20 13:40:09 -06002885}