blob: 62be0a41ad0b2ffeccb0b6e73d9a29621e4944f1 [file] [log] [blame]
Faisal Latifd3749842016-01-20 13:40:09 -06001/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/random.h>
38#include <linux/highmem.h>
39#include <linux/time.h>
Henry Oroscof26c7c82016-11-30 14:57:40 -060040#include <linux/hugetlb.h>
Faisal Latifd3749842016-01-20 13:40:09 -060041#include <asm/byteorder.h>
42#include <net/ip.h>
43#include <rdma/ib_verbs.h>
44#include <rdma/iw_cm.h>
45#include <rdma/ib_user_verbs.h>
46#include <rdma/ib_umem.h>
47#include "i40iw.h"
48
49/**
50 * i40iw_query_device - get device attributes
51 * @ibdev: device pointer from stack
52 * @props: returning device attributes
53 * @udata: user data
54 */
55static int i40iw_query_device(struct ib_device *ibdev,
56 struct ib_device_attr *props,
57 struct ib_udata *udata)
58{
59 struct i40iw_device *iwdev = to_iwdev(ibdev);
60
61 if (udata->inlen || udata->outlen)
62 return -EINVAL;
63 memset(props, 0, sizeof(*props));
64 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
65 props->fw_ver = I40IW_FW_VERSION;
66 props->device_cap_flags = iwdev->device_cap_flags;
Ismail, Mustafa4920dc32016-04-18 10:33:01 -050067 props->vendor_id = iwdev->ldev->pcidev->vendor;
68 props->vendor_part_id = iwdev->ldev->pcidev->device;
Faisal Latifd3749842016-01-20 13:40:09 -060069 props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
70 props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
Henry Orosco85a87c92016-11-09 21:30:28 -060071 props->max_qp = iwdev->max_qp - iwdev->used_qps;
Faisal Latifd3749842016-01-20 13:40:09 -060072 props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
73 props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
Henry Orosco85a87c92016-11-09 21:30:28 -060074 props->max_cq = iwdev->max_cq - iwdev->used_cqs;
Faisal Latifd3749842016-01-20 13:40:09 -060075 props->max_cqe = iwdev->max_cqe;
Henry Orosco85a87c92016-11-09 21:30:28 -060076 props->max_mr = iwdev->max_mr - iwdev->used_mrs;
77 props->max_pd = iwdev->max_pd - iwdev->used_pds;
Shiraz Saleem6c2f7612016-04-22 14:14:27 -050078 props->max_sge_rd = I40IW_MAX_SGE_RD;
Faisal Latifd3749842016-01-20 13:40:09 -060079 props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
80 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
81 props->atomic_cap = IB_ATOMIC_NONE;
82 props->max_map_per_fmr = 1;
Faisal Latif0477e182016-06-14 16:54:18 -050083 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
Faisal Latifd3749842016-01-20 13:40:09 -060084 return 0;
85}
86
87/**
88 * i40iw_query_port - get port attrubutes
89 * @ibdev: device pointer from stack
90 * @port: port number for query
91 * @props: returning device attributes
92 */
93static int i40iw_query_port(struct ib_device *ibdev,
94 u8 port,
95 struct ib_port_attr *props)
96{
97 struct i40iw_device *iwdev = to_iwdev(ibdev);
98 struct net_device *netdev = iwdev->netdev;
99
Or Gerlitzc4550c62017-01-24 13:02:39 +0200100 /* props being zeroed by the caller, avoid zeroing it here */
Faisal Latifd3749842016-01-20 13:40:09 -0600101 props->max_mtu = IB_MTU_4096;
Amrani, Ramd3f4aad2016-12-26 08:40:57 +0200102 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
Faisal Latifd3749842016-01-20 13:40:09 -0600103
104 props->lid = 1;
105 if (netif_carrier_ok(iwdev->netdev))
106 props->state = IB_PORT_ACTIVE;
107 else
108 props->state = IB_PORT_DOWN;
109 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
110 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
111 props->gid_tbl_len = 1;
112 props->pkey_tbl_len = 1;
113 props->active_width = IB_WIDTH_4X;
114 props->active_speed = 1;
Ismail, Mustafabd57aea2016-04-18 10:32:57 -0500115 props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
Faisal Latifd3749842016-01-20 13:40:09 -0600116 return 0;
117}
118
119/**
120 * i40iw_alloc_ucontext - Allocate the user context data structure
121 * @ibdev: device pointer from stack
122 * @udata: user data
123 *
124 * This keeps track of all objects associated with a particular
125 * user-mode client.
126 */
127static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
128 struct ib_udata *udata)
129{
130 struct i40iw_device *iwdev = to_iwdev(ibdev);
131 struct i40iw_alloc_ucontext_req req;
132 struct i40iw_alloc_ucontext_resp uresp;
133 struct i40iw_ucontext *ucontext;
134
135 if (ib_copy_from_udata(&req, udata, sizeof(req)))
136 return ERR_PTR(-EINVAL);
137
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600138 if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
139 i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
Faisal Latifd3749842016-01-20 13:40:09 -0600140 return ERR_PTR(-EINVAL);
141 }
142
143 memset(&uresp, 0, sizeof(uresp));
144 uresp.max_qps = iwdev->max_qp;
145 uresp.max_pds = iwdev->max_pd;
146 uresp.wq_size = iwdev->max_qp_wr * 2;
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600147 uresp.kernel_ver = req.userspace_ver;
Faisal Latifd3749842016-01-20 13:40:09 -0600148
149 ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
150 if (!ucontext)
151 return ERR_PTR(-ENOMEM);
152
153 ucontext->iwdev = iwdev;
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600154 ucontext->abi_ver = req.userspace_ver;
Faisal Latifd3749842016-01-20 13:40:09 -0600155
156 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
157 kfree(ucontext);
158 return ERR_PTR(-EFAULT);
159 }
160
161 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
162 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
163 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
164 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
165
166 return &ucontext->ibucontext;
167}
168
169/**
170 * i40iw_dealloc_ucontext - deallocate the user context data structure
171 * @context: user context created during alloc
172 */
173static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
174{
175 struct i40iw_ucontext *ucontext = to_ucontext(context);
176 unsigned long flags;
177
178 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
179 if (!list_empty(&ucontext->cq_reg_mem_list)) {
180 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
181 return -EBUSY;
182 }
183 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
184 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
185 if (!list_empty(&ucontext->qp_reg_mem_list)) {
186 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
187 return -EBUSY;
188 }
189 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
190
191 kfree(ucontext);
192 return 0;
193}
194
195/**
196 * i40iw_mmap - user memory map
197 * @context: context created during alloc
198 * @vma: kernel info for user memory map
199 */
200static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
201{
202 struct i40iw_ucontext *ucontext;
203 u64 db_addr_offset;
204 u64 push_offset;
205
206 ucontext = to_ucontext(context);
207 if (ucontext->iwdev->sc_dev.is_pf) {
208 db_addr_offset = I40IW_DB_ADDR_OFFSET;
209 push_offset = I40IW_PUSH_OFFSET;
210 if (vma->vm_pgoff)
211 vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
212 } else {
213 db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
214 push_offset = I40IW_VF_PUSH_OFFSET;
215 if (vma->vm_pgoff)
216 vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
217 }
218
219 vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
220
221 if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
222 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
223 vma->vm_private_data = ucontext;
224 } else {
225 if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
226 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
227 else
228 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
229 }
230
231 if (io_remap_pfn_range(vma, vma->vm_start,
232 vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
233 PAGE_SIZE, vma->vm_page_prot))
234 return -EAGAIN;
235
236 return 0;
237}
238
239/**
240 * i40iw_alloc_push_page - allocate a push page for qp
241 * @iwdev: iwarp device
242 * @qp: hardware control qp
243 */
244static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
245{
246 struct i40iw_cqp_request *cqp_request;
247 struct cqp_commands_info *cqp_info;
Faisal Latifd3749842016-01-20 13:40:09 -0600248 enum i40iw_status_code status;
249
250 if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
251 return;
252
253 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
254 if (!cqp_request)
255 return;
256
257 atomic_inc(&cqp_request->refcount);
258
259 cqp_info = &cqp_request->info;
260 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
261 cqp_info->post_sq = 1;
262
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500263 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
Faisal Latifd3749842016-01-20 13:40:09 -0600264 cqp_info->in.u.manage_push_page.info.free_page = 0;
265 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
266 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
267
268 status = i40iw_handle_cqp_op(iwdev, cqp_request);
269 if (!status)
270 qp->push_idx = cqp_request->compl_info.op_ret_val;
271 else
272 i40iw_pr_err("CQP-OP Push page fail");
273 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
274}
275
276/**
277 * i40iw_dealloc_push_page - free a push page for qp
278 * @iwdev: iwarp device
279 * @qp: hardware control qp
280 */
281static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
282{
283 struct i40iw_cqp_request *cqp_request;
284 struct cqp_commands_info *cqp_info;
Faisal Latifd3749842016-01-20 13:40:09 -0600285 enum i40iw_status_code status;
286
287 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
288 return;
289
290 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
291 if (!cqp_request)
292 return;
293
294 cqp_info = &cqp_request->info;
295 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
296 cqp_info->post_sq = 1;
297
298 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500299 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
Faisal Latifd3749842016-01-20 13:40:09 -0600300 cqp_info->in.u.manage_push_page.info.free_page = 1;
301 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
302 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
303
304 status = i40iw_handle_cqp_op(iwdev, cqp_request);
305 if (!status)
306 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
307 else
308 i40iw_pr_err("CQP-OP Push page fail");
309}
310
311/**
312 * i40iw_alloc_pd - allocate protection domain
313 * @ibdev: device pointer from stack
314 * @context: user context created during alloc
315 * @udata: user data
316 */
317static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
318 struct ib_ucontext *context,
319 struct ib_udata *udata)
320{
321 struct i40iw_pd *iwpd;
322 struct i40iw_device *iwdev = to_iwdev(ibdev);
323 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
324 struct i40iw_alloc_pd_resp uresp;
325 struct i40iw_sc_pd *sc_pd;
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600326 struct i40iw_ucontext *ucontext;
Faisal Latifd3749842016-01-20 13:40:09 -0600327 u32 pd_id = 0;
328 int err;
329
Mustafa Ismaild5965932016-11-30 14:59:26 -0600330 if (iwdev->closing)
331 return ERR_PTR(-ENODEV);
332
Faisal Latifd3749842016-01-20 13:40:09 -0600333 err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
334 iwdev->max_pd, &pd_id, &iwdev->next_pd);
335 if (err) {
336 i40iw_pr_err("alloc resource failed\n");
337 return ERR_PTR(err);
338 }
339
340 iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
341 if (!iwpd) {
342 err = -ENOMEM;
343 goto free_res;
344 }
345
346 sc_pd = &iwpd->sc_pd;
Faisal Latifd3749842016-01-20 13:40:09 -0600347
348 if (context) {
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600349 ucontext = to_ucontext(context);
350 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
Faisal Latifd3749842016-01-20 13:40:09 -0600351 memset(&uresp, 0, sizeof(uresp));
352 uresp.pd_id = pd_id;
353 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
354 err = -EFAULT;
355 goto error;
356 }
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600357 } else {
358 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
Faisal Latifd3749842016-01-20 13:40:09 -0600359 }
360
361 i40iw_add_pdusecount(iwpd);
362 return &iwpd->ibpd;
363error:
364 kfree(iwpd);
365free_res:
366 i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
367 return ERR_PTR(err);
368}
369
370/**
371 * i40iw_dealloc_pd - deallocate pd
372 * @ibpd: ptr of pd to be deallocated
373 */
374static int i40iw_dealloc_pd(struct ib_pd *ibpd)
375{
376 struct i40iw_pd *iwpd = to_iwpd(ibpd);
377 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
378
379 i40iw_rem_pdusecount(iwpd, iwdev);
380 return 0;
381}
382
383/**
384 * i40iw_qp_roundup - return round up qp ring size
385 * @wr_ring_size: ring size to round up
386 */
387static int i40iw_qp_roundup(u32 wr_ring_size)
388{
389 int scount = 1;
390
391 if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
392 wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
393
394 for (wr_ring_size--; scount <= 16; scount *= 2)
395 wr_ring_size |= wr_ring_size >> scount;
396 return ++wr_ring_size;
397}
398
399/**
400 * i40iw_get_pbl - Retrieve pbl from a list given a virtual
401 * address
402 * @va: user virtual address
403 * @pbl_list: pbl list to search in (QP's or CQ's)
404 */
405static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
406 struct list_head *pbl_list)
407{
408 struct i40iw_pbl *iwpbl;
409
410 list_for_each_entry(iwpbl, pbl_list, list) {
411 if (iwpbl->user_base == va) {
412 list_del(&iwpbl->list);
413 return iwpbl;
414 }
415 }
416 return NULL;
417}
418
419/**
420 * i40iw_free_qp_resources - free up memory resources for qp
421 * @iwdev: iwarp device
422 * @iwqp: qp ptr (user or kernel)
423 * @qp_num: qp number assigned
424 */
425void i40iw_free_qp_resources(struct i40iw_device *iwdev,
426 struct i40iw_qp *iwqp,
427 u32 qp_num)
428{
Tatyana Nikolovaaf56e532017-07-05 21:25:33 -0500429 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
430
Faisal Latifd3749842016-01-20 13:40:09 -0600431 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
432 if (qp_num)
433 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
Tatyana Nikolovaaf56e532017-07-05 21:25:33 -0500434 if (iwpbl->pbl_allocated)
435 i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
Faisal Latifd3749842016-01-20 13:40:09 -0600436 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
437 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
438 kfree(iwqp->kqp.wrid_mem);
439 iwqp->kqp.wrid_mem = NULL;
440 kfree(iwqp->allocated_buffer);
Faisal Latifd3749842016-01-20 13:40:09 -0600441}
442
443/**
444 * i40iw_clean_cqes - clean cq entries for qp
445 * @iwqp: qp ptr (user or kernel)
446 * @iwcq: cq ptr
447 */
448static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
449{
450 struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
451
452 ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
453}
454
455/**
456 * i40iw_destroy_qp - destroy qp
457 * @ibqp: qp's ib pointer also to get to device's qp address
458 */
459static int i40iw_destroy_qp(struct ib_qp *ibqp)
460{
461 struct i40iw_qp *iwqp = to_iwqp(ibqp);
462
463 iwqp->destroyed = 1;
464
465 if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
466 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
467
468 if (!iwqp->user_mode) {
469 if (iwqp->iwscq) {
470 i40iw_clean_cqes(iwqp, iwqp->iwscq);
471 if (iwqp->iwrcq != iwqp->iwscq)
472 i40iw_clean_cqes(iwqp, iwqp->iwrcq);
473 }
474 }
475
476 i40iw_rem_ref(&iwqp->ibqp);
477 return 0;
478}
479
480/**
481 * i40iw_setup_virt_qp - setup for allocation of virtual qp
482 * @dev: iwarp device
483 * @qp: qp ptr
484 * @init_info: initialize info to return
485 */
486static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
487 struct i40iw_qp *iwqp,
488 struct i40iw_qp_init_info *init_info)
489{
Tatyana Nikolovaaf56e532017-07-05 21:25:33 -0500490 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
Faisal Latifd3749842016-01-20 13:40:09 -0600491 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
492
493 iwqp->page = qpmr->sq_page;
494 init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
495 if (iwpbl->pbl_allocated) {
496 init_info->virtual_map = true;
497 init_info->sq_pa = qpmr->sq_pbl.idx;
498 init_info->rq_pa = qpmr->rq_pbl.idx;
499 } else {
500 init_info->sq_pa = qpmr->sq_pbl.addr;
501 init_info->rq_pa = qpmr->rq_pbl.addr;
502 }
503 return 0;
504}
505
506/**
507 * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
508 * @iwdev: iwarp device
509 * @iwqp: qp ptr (user or kernel)
510 * @info: initialize info to return
511 */
512static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
513 struct i40iw_qp *iwqp,
514 struct i40iw_qp_init_info *info)
515{
516 struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
517 u32 sqdepth, rqdepth;
518 u32 sq_size, rq_size;
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600519 u8 sqshift;
Faisal Latifd3749842016-01-20 13:40:09 -0600520 u32 size;
521 enum i40iw_status_code status;
522 struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
523
Faisal Latifd3749842016-01-20 13:40:09 -0600524 sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
525 rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
526
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500527 status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
Faisal Latifd3749842016-01-20 13:40:09 -0600528 if (status)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -0500529 return -ENOMEM;
Faisal Latifd3749842016-01-20 13:40:09 -0600530
531 sqdepth = sq_size << sqshift;
Chien Tin Tung61f51b72016-12-21 08:53:46 -0600532 rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
Faisal Latifd3749842016-01-20 13:40:09 -0600533
534 size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
535 iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
536
537 ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
538 if (!ukinfo->sq_wrtrk_array)
539 return -ENOMEM;
540
541 ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
542
543 size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
544 size += (I40IW_SHADOW_AREA_SIZE << 3);
545
546 status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
547 if (status) {
548 kfree(ukinfo->sq_wrtrk_array);
549 ukinfo->sq_wrtrk_array = NULL;
550 return -ENOMEM;
551 }
552
553 ukinfo->sq = mem->va;
554 info->sq_pa = mem->pa;
555
556 ukinfo->rq = &ukinfo->sq[sqdepth];
557 info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
558
559 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
560 info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
561
562 ukinfo->sq_size = sq_size;
563 ukinfo->rq_size = rq_size;
564 ukinfo->qp_id = iwqp->ibqp.qp_num;
565 return 0;
566}
567
568/**
569 * i40iw_create_qp - create qp
570 * @ibpd: ptr of pd
571 * @init_attr: attributes for qp
572 * @udata: user data for create qp
573 */
574static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
575 struct ib_qp_init_attr *init_attr,
576 struct ib_udata *udata)
577{
578 struct i40iw_pd *iwpd = to_iwpd(ibpd);
579 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
580 struct i40iw_cqp *iwcqp = &iwdev->cqp;
581 struct i40iw_qp *iwqp;
582 struct i40iw_ucontext *ucontext;
583 struct i40iw_create_qp_req req;
584 struct i40iw_create_qp_resp uresp;
585 u32 qp_num = 0;
586 void *mem;
587 enum i40iw_status_code ret;
588 int err_code;
589 int sq_size;
590 int rq_size;
591 struct i40iw_sc_qp *qp;
592 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
593 struct i40iw_qp_init_info init_info;
594 struct i40iw_create_qp_info *qp_info;
595 struct i40iw_cqp_request *cqp_request;
596 struct cqp_commands_info *cqp_info;
597
598 struct i40iw_qp_host_ctx_info *ctx_info;
599 struct i40iwarp_offload_info *iwarp_info;
600 unsigned long flags;
601
Mustafa Ismaild5965932016-11-30 14:59:26 -0600602 if (iwdev->closing)
603 return ERR_PTR(-ENODEV);
604
Faisal Latifd3749842016-01-20 13:40:09 -0600605 if (init_attr->create_flags)
606 return ERR_PTR(-EINVAL);
607 if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
608 init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
609
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500610 if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
611 init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
612
Henry Orosco01d0b362016-11-09 21:26:39 -0600613 if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
614 init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
615
Faisal Latifd3749842016-01-20 13:40:09 -0600616 memset(&init_info, 0, sizeof(init_info));
617
618 sq_size = init_attr->cap.max_send_wr;
619 rq_size = init_attr->cap.max_recv_wr;
620
Henry Oroscod6f7bbc2016-12-06 16:16:20 -0600621 init_info.vsi = &iwdev->vsi;
Faisal Latifd3749842016-01-20 13:40:09 -0600622 init_info.qp_uk_init_info.sq_size = sq_size;
623 init_info.qp_uk_init_info.rq_size = rq_size;
624 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
625 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
Ismail, Mustafa23ef48a2016-04-18 10:32:55 -0500626 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
Faisal Latifd3749842016-01-20 13:40:09 -0600627
628 mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
629 if (!mem)
630 return ERR_PTR(-ENOMEM);
631
632 iwqp = (struct i40iw_qp *)mem;
633 qp = &iwqp->sc_qp;
634 qp->back_qp = (void *)iwqp;
635 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
636
637 iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
638
639 if (i40iw_allocate_dma_mem(dev->hw,
640 &iwqp->q2_ctx_mem,
641 I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
642 256)) {
643 i40iw_pr_err("dma_mem failed\n");
644 err_code = -ENOMEM;
645 goto error;
646 }
647
648 init_info.q2 = iwqp->q2_ctx_mem.va;
649 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
650
651 init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
652 init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
653
654 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
655 &qp_num, &iwdev->next_qp);
656 if (err_code) {
657 i40iw_pr_err("qp resource\n");
658 goto error;
659 }
660
661 iwqp->allocated_buffer = mem;
662 iwqp->iwdev = iwdev;
663 iwqp->iwpd = iwpd;
664 iwqp->ibqp.qp_num = qp_num;
665 qp = &iwqp->sc_qp;
666 iwqp->iwscq = to_iwcq(init_attr->send_cq);
667 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
668
669 iwqp->host_ctx.va = init_info.host_ctx;
670 iwqp->host_ctx.pa = init_info.host_ctx_pa;
671 iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
672
673 init_info.pd = &iwpd->sc_pd;
674 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
675 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
676
677 if (init_attr->qp_type != IB_QPT_RC) {
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -0500678 err_code = -EINVAL;
Faisal Latifd3749842016-01-20 13:40:09 -0600679 goto error;
680 }
681 if (iwdev->push_mode)
682 i40iw_alloc_push_page(iwdev, qp);
683 if (udata) {
684 err_code = ib_copy_from_udata(&req, udata, sizeof(req));
685 if (err_code) {
686 i40iw_pr_err("ib_copy_from_data\n");
687 goto error;
688 }
689 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
690 if (ibpd->uobject && ibpd->uobject->context) {
691 iwqp->user_mode = 1;
692 ucontext = to_ucontext(ibpd->uobject->context);
693
694 if (req.user_wqe_buffers) {
Tatyana Nikolovaaf56e532017-07-05 21:25:33 -0500695 struct i40iw_pbl *iwpbl;
696
Faisal Latifd3749842016-01-20 13:40:09 -0600697 spin_lock_irqsave(
698 &ucontext->qp_reg_mem_list_lock, flags);
Tatyana Nikolovaaf56e532017-07-05 21:25:33 -0500699 iwpbl = i40iw_get_pbl(
Faisal Latifd3749842016-01-20 13:40:09 -0600700 (unsigned long)req.user_wqe_buffers,
701 &ucontext->qp_reg_mem_list);
702 spin_unlock_irqrestore(
703 &ucontext->qp_reg_mem_list_lock, flags);
704
Tatyana Nikolovaaf56e532017-07-05 21:25:33 -0500705 if (!iwpbl) {
Faisal Latifd3749842016-01-20 13:40:09 -0600706 err_code = -ENODATA;
707 i40iw_pr_err("no pbl info\n");
708 goto error;
709 }
Tatyana Nikolovaaf56e532017-07-05 21:25:33 -0500710 memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
Faisal Latifd3749842016-01-20 13:40:09 -0600711 }
712 }
713 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
714 } else {
715 err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
716 }
717
718 if (err_code) {
719 i40iw_pr_err("setup qp failed\n");
720 goto error;
721 }
722
723 init_info.type = I40IW_QP_TYPE_IWARP;
724 ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
725 if (ret) {
726 err_code = -EPROTO;
727 i40iw_pr_err("qp_init fail\n");
728 goto error;
729 }
730 ctx_info = &iwqp->ctx_info;
731 iwarp_info = &iwqp->iwarp_info;
732 iwarp_info->rd_enable = true;
733 iwarp_info->wr_rdresp_en = true;
Ismail, Mustafab7aee852016-04-18 10:33:06 -0500734 if (!iwqp->user_mode) {
735 iwarp_info->fast_reg_en = true;
Faisal Latifd3749842016-01-20 13:40:09 -0600736 iwarp_info->priv_mode_en = true;
Ismail, Mustafab7aee852016-04-18 10:33:06 -0500737 }
Faisal Latifd3749842016-01-20 13:40:09 -0600738 iwarp_info->ddp_ver = 1;
739 iwarp_info->rdmap_ver = 1;
740
741 ctx_info->iwarp_info_valid = true;
742 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
743 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
744 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
745 ctx_info->push_mode_en = false;
746 } else {
747 ctx_info->push_mode_en = true;
748 ctx_info->push_idx = qp->push_idx;
749 }
750
751 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
752 (u64 *)iwqp->host_ctx.va,
753 ctx_info);
754 ctx_info->iwarp_info_valid = false;
755 cqp_request = i40iw_get_cqp_request(iwcqp, true);
756 if (!cqp_request) {
757 err_code = -ENOMEM;
758 goto error;
759 }
760 cqp_info = &cqp_request->info;
761 qp_info = &cqp_request->info.in.u.qp_create.info;
762
763 memset(qp_info, 0, sizeof(*qp_info));
764
765 qp_info->cq_num_valid = true;
766 qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
767
768 cqp_info->cqp_cmd = OP_QP_CREATE;
769 cqp_info->post_sq = 1;
770 cqp_info->in.u.qp_create.qp = qp;
771 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
772 ret = i40iw_handle_cqp_op(iwdev, cqp_request);
773 if (ret) {
774 i40iw_pr_err("CQP-OP QP create fail");
775 err_code = -EACCES;
776 goto error;
777 }
778
779 i40iw_add_ref(&iwqp->ibqp);
780 spin_lock_init(&iwqp->lock);
781 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
782 iwdev->qp_table[qp_num] = iwqp;
783 i40iw_add_pdusecount(iwqp->iwpd);
Mustafa Ismaild5965932016-11-30 14:59:26 -0600784 i40iw_add_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -0600785 if (ibpd->uobject && udata) {
786 memset(&uresp, 0, sizeof(uresp));
787 uresp.actual_sq_size = sq_size;
788 uresp.actual_rq_size = rq_size;
789 uresp.qp_id = qp_num;
790 uresp.push_idx = qp->push_idx;
791 err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
792 if (err_code) {
793 i40iw_pr_err("copy_to_udata failed\n");
794 i40iw_destroy_qp(&iwqp->ibqp);
795 /* let the completion of the qp destroy free the qp */
796 return ERR_PTR(err_code);
797 }
798 }
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -0500799 init_completion(&iwqp->sq_drained);
800 init_completion(&iwqp->rq_drained);
Faisal Latifd3749842016-01-20 13:40:09 -0600801
802 return &iwqp->ibqp;
803error:
804 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
Faisal Latifd3749842016-01-20 13:40:09 -0600805 return ERR_PTR(err_code);
806}
807
808/**
809 * i40iw_query - query qp attributes
810 * @ibqp: qp pointer
811 * @attr: attributes pointer
812 * @attr_mask: Not used
813 * @init_attr: qp attributes to return
814 */
815static int i40iw_query_qp(struct ib_qp *ibqp,
816 struct ib_qp_attr *attr,
817 int attr_mask,
818 struct ib_qp_init_attr *init_attr)
819{
820 struct i40iw_qp *iwqp = to_iwqp(ibqp);
821 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
822
823 attr->qp_access_flags = 0;
824 attr->cap.max_send_wr = qp->qp_uk.sq_size;
825 attr->cap.max_recv_wr = qp->qp_uk.rq_size;
Faisal Latifd3749842016-01-20 13:40:09 -0600826 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
Henry Orosco01d0b362016-11-09 21:26:39 -0600827 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
828 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
Mustafa Ismail789f9032017-10-03 11:11:50 -0500829 attr->port_num = 1;
Faisal Latifd3749842016-01-20 13:40:09 -0600830 init_attr->event_handler = iwqp->ibqp.event_handler;
831 init_attr->qp_context = iwqp->ibqp.qp_context;
832 init_attr->send_cq = iwqp->ibqp.send_cq;
833 init_attr->recv_cq = iwqp->ibqp.recv_cq;
834 init_attr->srq = iwqp->ibqp.srq;
835 init_attr->cap = attr->cap;
Mustafa Ismail789f9032017-10-03 11:11:50 -0500836 init_attr->port_num = 1;
Faisal Latifd3749842016-01-20 13:40:09 -0600837 return 0;
838}
839
840/**
841 * i40iw_hw_modify_qp - setup cqp for modify qp
842 * @iwdev: iwarp device
843 * @iwqp: qp ptr (user or kernel)
844 * @info: info for modify qp
845 * @wait: flag to wait or not for modify qp completion
846 */
847void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
848 struct i40iw_modify_qp_info *info, bool wait)
849{
850 enum i40iw_status_code status;
851 struct i40iw_cqp_request *cqp_request;
852 struct cqp_commands_info *cqp_info;
853 struct i40iw_modify_qp_info *m_info;
854
855 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
856 if (!cqp_request)
857 return;
858
859 cqp_info = &cqp_request->info;
860 m_info = &cqp_info->in.u.qp_modify.info;
861 memcpy(m_info, info, sizeof(*m_info));
862 cqp_info->cqp_cmd = OP_QP_MODIFY;
863 cqp_info->post_sq = 1;
864 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
865 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
866 status = i40iw_handle_cqp_op(iwdev, cqp_request);
867 if (status)
868 i40iw_pr_err("CQP-OP Modify QP fail");
869}
870
871/**
872 * i40iw_modify_qp - modify qp request
873 * @ibqp: qp's pointer for modify
874 * @attr: access attributes
875 * @attr_mask: state mask
876 * @udata: user data
877 */
878int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
879 int attr_mask, struct ib_udata *udata)
880{
881 struct i40iw_qp *iwqp = to_iwqp(ibqp);
882 struct i40iw_device *iwdev = iwqp->iwdev;
883 struct i40iw_qp_host_ctx_info *ctx_info;
884 struct i40iwarp_offload_info *iwarp_info;
885 struct i40iw_modify_qp_info info;
886 u8 issue_modify_qp = 0;
887 u8 dont_wait = 0;
888 u32 err;
889 unsigned long flags;
890
891 memset(&info, 0, sizeof(info));
892 ctx_info = &iwqp->ctx_info;
893 iwarp_info = &iwqp->iwarp_info;
894
895 spin_lock_irqsave(&iwqp->lock, flags);
896
897 if (attr_mask & IB_QP_STATE) {
Mustafa Ismaild5965932016-11-30 14:59:26 -0600898 if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
899 err = -EINVAL;
900 goto exit;
901 }
902
Faisal Latifd3749842016-01-20 13:40:09 -0600903 switch (attr->qp_state) {
904 case IB_QPS_INIT:
905 case IB_QPS_RTR:
906 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
907 err = -EINVAL;
908 goto exit;
909 }
910 if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
911 info.next_iwarp_state = I40IW_QP_STATE_IDLE;
912 issue_modify_qp = 1;
913 }
914 break;
915 case IB_QPS_RTS:
916 if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
917 (!iwqp->cm_id)) {
918 err = -EINVAL;
919 goto exit;
920 }
921
922 issue_modify_qp = 1;
923 iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
924 iwqp->hte_added = 1;
925 info.next_iwarp_state = I40IW_QP_STATE_RTS;
926 info.tcp_ctx_valid = true;
927 info.ord_valid = true;
928 info.arp_cache_idx_valid = true;
929 info.cq_num_valid = true;
930 break;
931 case IB_QPS_SQD:
932 if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
933 err = 0;
934 goto exit;
935 }
936 if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
937 (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
938 err = 0;
939 goto exit;
940 }
941 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
942 err = -EINVAL;
943 goto exit;
944 }
945 info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
946 issue_modify_qp = 1;
947 break;
948 case IB_QPS_SQE:
949 if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
950 err = -EINVAL;
951 goto exit;
952 }
953 info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
954 issue_modify_qp = 1;
955 break;
956 case IB_QPS_ERR:
957 case IB_QPS_RESET:
958 if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
959 err = -EINVAL;
960 goto exit;
961 }
962 if (iwqp->sc_qp.term_flags)
Shiraz Saleemd627b502016-12-06 15:49:33 -0600963 i40iw_terminate_del_timer(&iwqp->sc_qp);
Faisal Latifd3749842016-01-20 13:40:09 -0600964 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
965 if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
966 iwdev->iw_status &&
967 (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
968 info.reset_tcp_conn = true;
969 else
970 dont_wait = 1;
971 issue_modify_qp = 1;
972 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
973 break;
974 default:
975 err = -EINVAL;
976 goto exit;
977 }
978
979 iwqp->ibqp_state = attr->qp_state;
980
981 if (issue_modify_qp)
982 iwqp->iwarp_state = info.next_iwarp_state;
983 else
984 info.next_iwarp_state = iwqp->iwarp_state;
985 }
986 if (attr_mask & IB_QP_ACCESS_FLAGS) {
987 ctx_info->iwarp_info_valid = true;
988 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
989 iwarp_info->wr_rdresp_en = true;
990 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
991 iwarp_info->wr_rdresp_en = true;
992 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
993 iwarp_info->rd_enable = true;
994 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
995 iwarp_info->bind_en = true;
996
997 if (iwqp->user_mode) {
998 iwarp_info->rd_enable = true;
999 iwarp_info->wr_rdresp_en = true;
1000 iwarp_info->priv_mode_en = false;
1001 }
1002 }
1003
1004 if (ctx_info->iwarp_info_valid) {
1005 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1006 int ret;
1007
1008 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1009 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1010 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
1011 (u64 *)iwqp->host_ctx.va,
1012 ctx_info);
1013 if (ret) {
1014 i40iw_pr_err("setting QP context\n");
1015 err = -EINVAL;
1016 goto exit;
1017 }
1018 }
1019
1020 spin_unlock_irqrestore(&iwqp->lock, flags);
1021
1022 if (issue_modify_qp)
1023 i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
1024
1025 if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
1026 if (dont_wait) {
1027 if (iwqp->cm_id && iwqp->hw_tcp_state) {
1028 spin_lock_irqsave(&iwqp->lock, flags);
1029 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1030 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1031 spin_unlock_irqrestore(&iwqp->lock, flags);
Shiraz Saleem471b3702017-09-19 09:19:11 -05001032 i40iw_cm_disconn(iwqp);
Faisal Latifd3749842016-01-20 13:40:09 -06001033 }
Shiraz Saleem471b3702017-09-19 09:19:11 -05001034 } else {
1035 spin_lock_irqsave(&iwqp->lock, flags);
1036 if (iwqp->cm_id) {
1037 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
1038 iwqp->cm_id->add_ref(iwqp->cm_id);
1039 i40iw_schedule_cm_timer(iwqp->cm_node,
1040 (struct i40iw_puda_buf *)iwqp,
1041 I40IW_TIMER_TYPE_CLOSE, 1, 0);
1042 }
1043 }
1044 spin_unlock_irqrestore(&iwqp->lock, flags);
Faisal Latifd3749842016-01-20 13:40:09 -06001045 }
1046 }
1047 return 0;
1048exit:
1049 spin_unlock_irqrestore(&iwqp->lock, flags);
1050 return err;
1051}
1052
1053/**
1054 * cq_free_resources - free up recources for cq
1055 * @iwdev: iwarp device
1056 * @iwcq: cq ptr
1057 */
1058static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1059{
1060 struct i40iw_sc_cq *cq = &iwcq->sc_cq;
1061
1062 if (!iwcq->user_mode)
1063 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1064 i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1065}
1066
1067/**
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06001068 * i40iw_cq_wq_destroy - send cq destroy cqp
Faisal Latifd3749842016-01-20 13:40:09 -06001069 * @iwdev: iwarp device
1070 * @cq: hardware control cq
1071 */
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06001072void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
Faisal Latifd3749842016-01-20 13:40:09 -06001073{
1074 enum i40iw_status_code status;
1075 struct i40iw_cqp_request *cqp_request;
1076 struct cqp_commands_info *cqp_info;
1077
1078 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1079 if (!cqp_request)
1080 return;
1081
1082 cqp_info = &cqp_request->info;
1083
1084 cqp_info->cqp_cmd = OP_CQ_DESTROY;
1085 cqp_info->post_sq = 1;
1086 cqp_info->in.u.cq_destroy.cq = cq;
1087 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1088 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1089 if (status)
1090 i40iw_pr_err("CQP-OP Destroy QP fail");
1091}
1092
1093/**
1094 * i40iw_destroy_cq - destroy cq
1095 * @ib_cq: cq pointer
1096 */
1097static int i40iw_destroy_cq(struct ib_cq *ib_cq)
1098{
1099 struct i40iw_cq *iwcq;
1100 struct i40iw_device *iwdev;
1101 struct i40iw_sc_cq *cq;
1102
1103 if (!ib_cq) {
1104 i40iw_pr_err("ib_cq == NULL\n");
1105 return 0;
1106 }
1107
1108 iwcq = to_iwcq(ib_cq);
1109 iwdev = to_iwdev(ib_cq->device);
1110 cq = &iwcq->sc_cq;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06001111 i40iw_cq_wq_destroy(iwdev, cq);
Faisal Latifd3749842016-01-20 13:40:09 -06001112 cq_free_resources(iwdev, iwcq);
1113 kfree(iwcq);
Mustafa Ismaild5965932016-11-30 14:59:26 -06001114 i40iw_rem_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -06001115 return 0;
1116}
1117
1118/**
1119 * i40iw_create_cq - create cq
1120 * @ibdev: device pointer from stack
1121 * @attr: attributes for cq
1122 * @context: user context created during alloc
1123 * @udata: user data
1124 */
1125static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1126 const struct ib_cq_init_attr *attr,
1127 struct ib_ucontext *context,
1128 struct ib_udata *udata)
1129{
1130 struct i40iw_device *iwdev = to_iwdev(ibdev);
1131 struct i40iw_cq *iwcq;
1132 struct i40iw_pbl *iwpbl;
1133 u32 cq_num = 0;
1134 struct i40iw_sc_cq *cq;
1135 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1136 struct i40iw_cq_init_info info;
1137 enum i40iw_status_code status;
1138 struct i40iw_cqp_request *cqp_request;
1139 struct cqp_commands_info *cqp_info;
1140 struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1141 unsigned long flags;
1142 int err_code;
1143 int entries = attr->cqe;
1144
Mustafa Ismaild5965932016-11-30 14:59:26 -06001145 if (iwdev->closing)
1146 return ERR_PTR(-ENODEV);
1147
Faisal Latifd3749842016-01-20 13:40:09 -06001148 if (entries > iwdev->max_cqe)
1149 return ERR_PTR(-EINVAL);
1150
1151 iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1152 if (!iwcq)
1153 return ERR_PTR(-ENOMEM);
1154
1155 memset(&info, 0, sizeof(info));
1156
1157 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1158 iwdev->max_cq, &cq_num,
1159 &iwdev->next_cq);
1160 if (err_code)
1161 goto error;
1162
1163 cq = &iwcq->sc_cq;
1164 cq->back_cq = (void *)iwcq;
1165 spin_lock_init(&iwcq->lock);
1166
1167 info.dev = dev;
1168 ukinfo->cq_size = max(entries, 4);
1169 ukinfo->cq_id = cq_num;
1170 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1171 info.ceqe_mask = 0;
Henry Oroscoe69c5092016-11-09 21:24:48 -06001172 if (attr->comp_vector < iwdev->ceqs_count)
1173 info.ceq_id = attr->comp_vector;
Faisal Latifd3749842016-01-20 13:40:09 -06001174 info.ceq_id_valid = true;
1175 info.ceqe_mask = 1;
1176 info.type = I40IW_CQ_TYPE_IWARP;
1177 if (context) {
1178 struct i40iw_ucontext *ucontext;
1179 struct i40iw_create_cq_req req;
1180 struct i40iw_cq_mr *cqmr;
1181
1182 memset(&req, 0, sizeof(req));
1183 iwcq->user_mode = true;
1184 ucontext = to_ucontext(context);
Dan Carpenter6031e0792017-07-13 10:47:22 +03001185 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
1186 err_code = -EFAULT;
Faisal Latifd3749842016-01-20 13:40:09 -06001187 goto cq_free_resources;
Dan Carpenter6031e0792017-07-13 10:47:22 +03001188 }
Faisal Latifd3749842016-01-20 13:40:09 -06001189
1190 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1191 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
1192 &ucontext->cq_reg_mem_list);
1193 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1194 if (!iwpbl) {
1195 err_code = -EPROTO;
1196 goto cq_free_resources;
1197 }
1198
1199 iwcq->iwpbl = iwpbl;
1200 iwcq->cq_mem_size = 0;
1201 cqmr = &iwpbl->cq_mr;
1202 info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
1203 if (iwpbl->pbl_allocated) {
1204 info.virtual_map = true;
1205 info.pbl_chunk_size = 1;
1206 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1207 } else {
1208 info.cq_base_pa = cqmr->cq_pbl.addr;
1209 }
1210 } else {
1211 /* Kmode allocations */
1212 int rsize;
1213 int shadow;
1214
1215 rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
1216 rsize = round_up(rsize, 256);
1217 shadow = I40IW_SHADOW_AREA_SIZE << 3;
1218 status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
1219 rsize + shadow, 256);
1220 if (status) {
1221 err_code = -ENOMEM;
1222 goto cq_free_resources;
1223 }
1224 ukinfo->cq_base = iwcq->kmem.va;
1225 info.cq_base_pa = iwcq->kmem.pa;
1226 info.shadow_area_pa = info.cq_base_pa + rsize;
1227 ukinfo->shadow_area = iwcq->kmem.va + rsize;
1228 }
1229
1230 if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
1231 i40iw_pr_err("init cq fail\n");
1232 err_code = -EPROTO;
1233 goto cq_free_resources;
1234 }
1235
1236 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1237 if (!cqp_request) {
1238 err_code = -ENOMEM;
1239 goto cq_free_resources;
1240 }
1241
1242 cqp_info = &cqp_request->info;
1243 cqp_info->cqp_cmd = OP_CQ_CREATE;
1244 cqp_info->post_sq = 1;
1245 cqp_info->in.u.cq_create.cq = cq;
1246 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1247 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1248 if (status) {
1249 i40iw_pr_err("CQP-OP Create QP fail");
1250 err_code = -EPROTO;
1251 goto cq_free_resources;
1252 }
1253
1254 if (context) {
1255 struct i40iw_create_cq_resp resp;
1256
1257 memset(&resp, 0, sizeof(resp));
1258 resp.cq_id = info.cq_uk_init_info.cq_id;
1259 resp.cq_size = info.cq_uk_init_info.cq_size;
1260 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1261 i40iw_pr_err("copy to user data\n");
1262 err_code = -EPROTO;
1263 goto cq_destroy;
1264 }
1265 }
1266
Mustafa Ismaild5965932016-11-30 14:59:26 -06001267 i40iw_add_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -06001268 return (struct ib_cq *)iwcq;
1269
1270cq_destroy:
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06001271 i40iw_cq_wq_destroy(iwdev, cq);
Faisal Latifd3749842016-01-20 13:40:09 -06001272cq_free_resources:
1273 cq_free_resources(iwdev, iwcq);
1274error:
1275 kfree(iwcq);
1276 return ERR_PTR(err_code);
1277}
1278
1279/**
1280 * i40iw_get_user_access - get hw access from IB access
1281 * @acc: IB access to return hw access
1282 */
1283static inline u16 i40iw_get_user_access(int acc)
1284{
1285 u16 access = 0;
1286
1287 access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
1288 access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
1289 access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
1290 access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
1291 return access;
1292}
1293
1294/**
1295 * i40iw_free_stag - free stag resource
1296 * @iwdev: iwarp device
1297 * @stag: stag to free
1298 */
1299static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1300{
1301 u32 stag_idx;
1302
1303 stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1304 i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
Mustafa Ismaild5965932016-11-30 14:59:26 -06001305 i40iw_rem_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -06001306}
1307
1308/**
1309 * i40iw_create_stag - create random stag
1310 * @iwdev: iwarp device
1311 */
1312static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1313{
1314 u32 stag = 0;
1315 u32 stag_index = 0;
1316 u32 next_stag_index;
1317 u32 driver_key;
1318 u32 random;
1319 u8 consumer_key;
1320 int ret;
1321
1322 get_random_bytes(&random, sizeof(random));
1323 consumer_key = (u8)random;
1324
1325 driver_key = random & ~iwdev->mr_stagmask;
1326 next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1327 next_stag_index %= iwdev->max_mr;
1328
1329 ret = i40iw_alloc_resource(iwdev,
1330 iwdev->allocated_mrs, iwdev->max_mr,
1331 &stag_index, &next_stag_index);
1332 if (!ret) {
1333 stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
1334 stag |= driver_key;
1335 stag += (u32)consumer_key;
Mustafa Ismaild5965932016-11-30 14:59:26 -06001336 i40iw_add_devusecount(iwdev);
Faisal Latifd3749842016-01-20 13:40:09 -06001337 }
1338 return stag;
1339}
1340
1341/**
1342 * i40iw_next_pbl_addr - Get next pbl address
Faisal Latifd3749842016-01-20 13:40:09 -06001343 * @pbl: pointer to a pble
1344 * @pinfo: info pointer
1345 * @idx: index
1346 */
Henry Oroscof26c7c82016-11-30 14:57:40 -06001347static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
Faisal Latifd3749842016-01-20 13:40:09 -06001348 struct i40iw_pble_info **pinfo,
1349 u32 *idx)
1350{
1351 *idx += 1;
1352 if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
1353 return ++pbl;
1354 *idx = 0;
1355 (*pinfo)++;
1356 return (u64 *)(*pinfo)->addr;
1357}
1358
1359/**
1360 * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1361 * @iwmr: iwmr for IB's user page addresses
1362 * @pbl: ple pointer to save 1 level or 0 level pble
1363 * @level: indicated level 0, 1 or 2
1364 */
1365static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
1366 u64 *pbl,
1367 enum i40iw_pble_level level)
1368{
1369 struct ib_umem *region = iwmr->region;
1370 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03001371 int chunk_pages, entry, i;
Faisal Latifd3749842016-01-20 13:40:09 -06001372 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1373 struct i40iw_pble_info *pinfo;
1374 struct scatterlist *sg;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001375 u64 pg_addr = 0;
Faisal Latifd3749842016-01-20 13:40:09 -06001376 u32 idx = 0;
1377
1378 pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001379
Faisal Latifd3749842016-01-20 13:40:09 -06001380 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03001381 chunk_pages = sg_dma_len(sg) >> region->page_shift;
Faisal Latifd3749842016-01-20 13:40:09 -06001382 if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
1383 !iwpbl->qp_mr.sq_page)
1384 iwpbl->qp_mr.sq_page = sg_page(sg);
1385 for (i = 0; i < chunk_pages; i++) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03001386 pg_addr = sg_dma_address(sg) +
1387 (i << region->page_shift);
Henry Oroscof26c7c82016-11-30 14:57:40 -06001388
1389 if ((entry + i) == 0)
1390 *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
1391 else if (!(pg_addr & ~iwmr->page_msk))
1392 *pbl = cpu_to_le64(pg_addr);
1393 else
1394 continue;
1395 pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
Faisal Latifd3749842016-01-20 13:40:09 -06001396 }
1397 }
1398}
1399
1400/**
Henry Oroscof26c7c82016-11-30 14:57:40 -06001401 * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
1402 * @addr: virtual address
1403 * @iwmr: mr pointer for this memory registration
1404 */
1405static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
1406{
1407 struct vm_area_struct *vma;
1408 struct hstate *h;
1409
1410 vma = find_vma(current->mm, addr);
1411 if (vma && is_vm_hugetlb_page(vma)) {
1412 h = hstate_vma(vma);
1413 if (huge_page_size(h) == 0x200000) {
1414 iwmr->page_size = huge_page_size(h);
1415 iwmr->page_msk = huge_page_mask(h);
Faisal Latifd3749842016-01-20 13:40:09 -06001416 }
1417 }
1418}
1419
1420/**
Henry Oroscob6a529d2016-11-30 14:56:14 -06001421 * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
1422 * @arr: lvl1 pbl array
1423 * @npages: page count
1424 * pg_size: page size
1425 *
1426 */
1427static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1428{
1429 u32 pg_idx;
1430
1431 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1432 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1433 return false;
1434 }
1435 return true;
1436}
1437
1438/**
1439 * i40iw_check_mr_contiguous - check if MR is physically contiguous
1440 * @palloc: pbl allocation struct
1441 * pg_size: page size
1442 */
1443static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
1444{
1445 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
1446 struct i40iw_pble_info *leaf = lvl2->leaf;
1447 u64 *arr = NULL;
1448 u64 *start_addr = NULL;
1449 int i;
1450 bool ret;
1451
1452 if (palloc->level == I40IW_LEVEL_1) {
1453 arr = (u64 *)palloc->level1.addr;
1454 ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
1455 return ret;
1456 }
1457
1458 start_addr = (u64 *)leaf->addr;
1459
1460 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1461 arr = (u64 *)leaf->addr;
1462 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1463 return false;
1464 ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
1465 if (!ret)
1466 return false;
1467 }
1468
1469 return true;
1470}
1471
1472/**
Faisal Latifd3749842016-01-20 13:40:09 -06001473 * i40iw_setup_pbles - copy user pg address to pble's
1474 * @iwdev: iwarp device
1475 * @iwmr: mr pointer for this memory registration
Henry Oroscob6a529d2016-11-30 14:56:14 -06001476 * @use_pbles: flag if to use pble's
Faisal Latifd3749842016-01-20 13:40:09 -06001477 */
1478static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1479 struct i40iw_mr *iwmr,
1480 bool use_pbles)
1481{
1482 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1483 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1484 struct i40iw_pble_info *pinfo;
1485 u64 *pbl;
1486 enum i40iw_status_code status;
1487 enum i40iw_pble_level level = I40IW_LEVEL_1;
1488
Faisal Latifd3749842016-01-20 13:40:09 -06001489 if (use_pbles) {
1490 mutex_lock(&iwdev->pbl_mutex);
1491 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1492 mutex_unlock(&iwdev->pbl_mutex);
1493 if (status)
1494 return -ENOMEM;
1495
1496 iwpbl->pbl_allocated = true;
1497 level = palloc->level;
1498 pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
1499 pbl = (u64 *)pinfo->addr;
1500 } else {
1501 pbl = iwmr->pgaddrmem;
1502 }
1503
1504 i40iw_copy_user_pgaddrs(iwmr, pbl, level);
Henry Oroscob6a529d2016-11-30 14:56:14 -06001505
1506 if (use_pbles)
1507 iwmr->pgaddrmem[0] = *pbl;
1508
Faisal Latifd3749842016-01-20 13:40:09 -06001509 return 0;
1510}
1511
1512/**
1513 * i40iw_handle_q_mem - handle memory for qp and cq
1514 * @iwdev: iwarp device
1515 * @req: information for q memory management
1516 * @iwpbl: pble struct
1517 * @use_pbles: flag to use pble
1518 */
1519static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1520 struct i40iw_mem_reg_req *req,
1521 struct i40iw_pbl *iwpbl,
1522 bool use_pbles)
1523{
1524 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1525 struct i40iw_mr *iwmr = iwpbl->iwmr;
1526 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
1527 struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
1528 struct i40iw_hmc_pble *hmc_p;
1529 u64 *arr = iwmr->pgaddrmem;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001530 u32 pg_size;
Faisal Latifd3749842016-01-20 13:40:09 -06001531 int err;
1532 int total;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001533 bool ret = true;
Faisal Latifd3749842016-01-20 13:40:09 -06001534
1535 total = req->sq_pages + req->rq_pages + req->cq_pages;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001536 pg_size = iwmr->page_size;
Faisal Latifd3749842016-01-20 13:40:09 -06001537
1538 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1539 if (err)
1540 return err;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001541
Faisal Latifd3749842016-01-20 13:40:09 -06001542 if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
1543 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1544 iwpbl->pbl_allocated = false;
1545 return -ENOMEM;
1546 }
1547
1548 if (use_pbles)
1549 arr = (u64 *)palloc->level1.addr;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001550
1551 if (iwmr->type == IW_MEMREG_TYPE_QP) {
Faisal Latifd3749842016-01-20 13:40:09 -06001552 hmc_p = &qpmr->sq_pbl;
1553 qpmr->shadow = (dma_addr_t)arr[total];
Henry Oroscob6a529d2016-11-30 14:56:14 -06001554
Faisal Latifd3749842016-01-20 13:40:09 -06001555 if (use_pbles) {
Henry Oroscob6a529d2016-11-30 14:56:14 -06001556 ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
1557 if (ret)
1558 ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
1559 }
1560
1561 if (!ret) {
Faisal Latifd3749842016-01-20 13:40:09 -06001562 hmc_p->idx = palloc->level1.idx;
1563 hmc_p = &qpmr->rq_pbl;
1564 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1565 } else {
1566 hmc_p->addr = arr[0];
1567 hmc_p = &qpmr->rq_pbl;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001568 hmc_p->addr = arr[req->sq_pages];
Faisal Latifd3749842016-01-20 13:40:09 -06001569 }
1570 } else { /* CQ */
1571 hmc_p = &cqmr->cq_pbl;
1572 cqmr->shadow = (dma_addr_t)arr[total];
Henry Oroscob6a529d2016-11-30 14:56:14 -06001573
Faisal Latifd3749842016-01-20 13:40:09 -06001574 if (use_pbles)
Henry Oroscob6a529d2016-11-30 14:56:14 -06001575 ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
1576
1577 if (!ret)
Faisal Latifd3749842016-01-20 13:40:09 -06001578 hmc_p->idx = palloc->level1.idx;
1579 else
1580 hmc_p->addr = arr[0];
1581 }
Henry Oroscob6a529d2016-11-30 14:56:14 -06001582
1583 if (use_pbles && ret) {
1584 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1585 iwpbl->pbl_allocated = false;
1586 }
1587
Faisal Latifd3749842016-01-20 13:40:09 -06001588 return err;
1589}
1590
1591/**
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001592 * i40iw_hw_alloc_stag - cqp command to allocate stag
1593 * @iwdev: iwarp device
1594 * @iwmr: iwarp mr pointer
1595 */
1596static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1597{
1598 struct i40iw_allocate_stag_info *info;
1599 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1600 enum i40iw_status_code status;
1601 int err = 0;
1602 struct i40iw_cqp_request *cqp_request;
1603 struct cqp_commands_info *cqp_info;
1604
1605 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1606 if (!cqp_request)
1607 return -ENOMEM;
1608
1609 cqp_info = &cqp_request->info;
1610 info = &cqp_info->in.u.alloc_stag.info;
1611 memset(info, 0, sizeof(*info));
1612 info->page_size = PAGE_SIZE;
1613 info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1614 info->pd_id = iwpd->sc_pd.pd_id;
1615 info->total_len = iwmr->length;
Shiraz Saleem8e0e7ae2016-06-27 16:52:14 -05001616 info->remote_access = true;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001617 cqp_info->cqp_cmd = OP_ALLOC_STAG;
1618 cqp_info->post_sq = 1;
1619 cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1620 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1621
1622 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1623 if (status) {
1624 err = -ENOMEM;
1625 i40iw_pr_err("CQP-OP MR Reg fail");
1626 }
1627 return err;
1628}
1629
1630/**
1631 * i40iw_alloc_mr - register stag for fast memory registration
1632 * @pd: ibpd pointer
1633 * @mr_type: memory for stag registrion
1634 * @max_num_sg: man number of pages
1635 */
1636static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
1637 enum ib_mr_type mr_type,
1638 u32 max_num_sg)
1639{
1640 struct i40iw_pd *iwpd = to_iwpd(pd);
1641 struct i40iw_device *iwdev = to_iwdev(pd->device);
1642 struct i40iw_pble_alloc *palloc;
1643 struct i40iw_pbl *iwpbl;
1644 struct i40iw_mr *iwmr;
1645 enum i40iw_status_code status;
1646 u32 stag;
1647 int err_code = -ENOMEM;
1648
1649 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1650 if (!iwmr)
1651 return ERR_PTR(-ENOMEM);
1652
1653 stag = i40iw_create_stag(iwdev);
1654 if (!stag) {
1655 err_code = -EOVERFLOW;
1656 goto err;
1657 }
1658 iwmr->stag = stag;
1659 iwmr->ibmr.rkey = stag;
1660 iwmr->ibmr.lkey = stag;
1661 iwmr->ibmr.pd = pd;
1662 iwmr->ibmr.device = pd->device;
1663 iwpbl = &iwmr->iwpbl;
1664 iwpbl->iwmr = iwmr;
1665 iwmr->type = IW_MEMREG_TYPE_MEM;
1666 palloc = &iwpbl->pble_alloc;
1667 iwmr->page_cnt = max_num_sg;
1668 mutex_lock(&iwdev->pbl_mutex);
1669 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1670 mutex_unlock(&iwdev->pbl_mutex);
Faisal Latifee23abd2016-06-14 16:54:17 -05001671 if (status)
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001672 goto err1;
1673
1674 if (palloc->level != I40IW_LEVEL_1)
1675 goto err2;
1676 err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1677 if (err_code)
1678 goto err2;
1679 iwpbl->pbl_allocated = true;
1680 i40iw_add_pdusecount(iwpd);
1681 return &iwmr->ibmr;
1682err2:
1683 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1684err1:
1685 i40iw_free_stag(iwdev, stag);
1686err:
1687 kfree(iwmr);
1688 return ERR_PTR(err_code);
1689}
1690
1691/**
1692 * i40iw_set_page - populate pbl list for fmr
1693 * @ibmr: ib mem to access iwarp mr pointer
1694 * @addr: page dma address fro pbl list
1695 */
1696static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
1697{
1698 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1699 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1700 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1701 u64 *pbl;
1702
1703 if (unlikely(iwmr->npages == iwmr->page_cnt))
1704 return -ENOMEM;
1705
1706 pbl = (u64 *)palloc->level1.addr;
1707 pbl[iwmr->npages++] = cpu_to_le64(addr);
1708 return 0;
1709}
1710
1711/**
1712 * i40iw_map_mr_sg - map of sg list for fmr
1713 * @ibmr: ib mem to access iwarp mr pointer
1714 * @sg: scatter gather list for fmr
1715 * @sg_nents: number of sg pages
1716 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001717static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001718 int sg_nents, unsigned int *sg_offset)
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001719{
1720 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1721
1722 iwmr->npages = 0;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001723 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
Ismail, Mustafab7aee852016-04-18 10:33:06 -05001724}
1725
1726/**
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05001727 * i40iw_drain_sq - drain the send queue
1728 * @ibqp: ib qp pointer
1729 */
1730static void i40iw_drain_sq(struct ib_qp *ibqp)
1731{
1732 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1733 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1734
1735 if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
1736 wait_for_completion(&iwqp->sq_drained);
1737}
1738
1739/**
1740 * i40iw_drain_rq - drain the receive queue
1741 * @ibqp: ib qp pointer
1742 */
1743static void i40iw_drain_rq(struct ib_qp *ibqp)
1744{
1745 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1746 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1747
1748 if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
1749 wait_for_completion(&iwqp->rq_drained);
1750}
1751
1752/**
Faisal Latifd3749842016-01-20 13:40:09 -06001753 * i40iw_hwreg_mr - send cqp command for memory registration
1754 * @iwdev: iwarp device
1755 * @iwmr: iwarp mr pointer
1756 * @access: access for MR
1757 */
1758static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1759 struct i40iw_mr *iwmr,
1760 u16 access)
1761{
1762 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1763 struct i40iw_reg_ns_stag_info *stag_info;
1764 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1765 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1766 enum i40iw_status_code status;
1767 int err = 0;
1768 struct i40iw_cqp_request *cqp_request;
1769 struct cqp_commands_info *cqp_info;
1770
1771 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1772 if (!cqp_request)
1773 return -ENOMEM;
1774
1775 cqp_info = &cqp_request->info;
1776 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1777 memset(stag_info, 0, sizeof(*stag_info));
1778 stag_info->va = (void *)(unsigned long)iwpbl->user_base;
1779 stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1780 stag_info->stag_key = (u8)iwmr->stag;
1781 stag_info->total_len = iwmr->length;
1782 stag_info->access_rights = access;
1783 stag_info->pd_id = iwpd->sc_pd.pd_id;
1784 stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001785 stag_info->page_size = iwmr->page_size;
Faisal Latifd3749842016-01-20 13:40:09 -06001786
Henry Oroscob6a529d2016-11-30 14:56:14 -06001787 if (iwpbl->pbl_allocated) {
Faisal Latifd3749842016-01-20 13:40:09 -06001788 if (palloc->level == I40IW_LEVEL_1) {
1789 stag_info->first_pm_pbl_index = palloc->level1.idx;
1790 stag_info->chunk_size = 1;
1791 } else {
1792 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1793 stag_info->chunk_size = 3;
1794 }
1795 } else {
1796 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1797 }
1798
1799 cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
1800 cqp_info->post_sq = 1;
1801 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1802 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1803
1804 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1805 if (status) {
1806 err = -ENOMEM;
1807 i40iw_pr_err("CQP-OP MR Reg fail");
1808 }
1809 return err;
1810}
1811
1812/**
1813 * i40iw_reg_user_mr - Register a user memory region
1814 * @pd: ptr of pd
1815 * @start: virtual start address
1816 * @length: length of mr
1817 * @virt: virtual address
1818 * @acc: access of mr
1819 * @udata: user data
1820 */
1821static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1822 u64 start,
1823 u64 length,
1824 u64 virt,
1825 int acc,
1826 struct ib_udata *udata)
1827{
1828 struct i40iw_pd *iwpd = to_iwpd(pd);
1829 struct i40iw_device *iwdev = to_iwdev(pd->device);
1830 struct i40iw_ucontext *ucontext;
1831 struct i40iw_pble_alloc *palloc;
1832 struct i40iw_pbl *iwpbl;
1833 struct i40iw_mr *iwmr;
1834 struct ib_umem *region;
1835 struct i40iw_mem_reg_req req;
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001836 u64 pbl_depth = 0;
Faisal Latifd3749842016-01-20 13:40:09 -06001837 u32 stag = 0;
1838 u16 access;
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001839 u64 region_length;
Faisal Latifd3749842016-01-20 13:40:09 -06001840 bool use_pbles = false;
1841 unsigned long flags;
1842 int err = -ENOSYS;
Henry Oroscob6a529d2016-11-30 14:56:14 -06001843 int ret;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001844 int pg_shift;
Faisal Latifd3749842016-01-20 13:40:09 -06001845
Mustafa Ismaild5965932016-11-30 14:59:26 -06001846 if (iwdev->closing)
1847 return ERR_PTR(-ENODEV);
Faisal Latifd3749842016-01-20 13:40:09 -06001848
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001849 if (length > I40IW_MAX_MR_SIZE)
1850 return ERR_PTR(-EINVAL);
Faisal Latifd3749842016-01-20 13:40:09 -06001851 region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
1852 if (IS_ERR(region))
1853 return (struct ib_mr *)region;
1854
1855 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
1856 ib_umem_release(region);
1857 return ERR_PTR(-EFAULT);
1858 }
1859
1860 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1861 if (!iwmr) {
1862 ib_umem_release(region);
1863 return ERR_PTR(-ENOMEM);
1864 }
1865
1866 iwpbl = &iwmr->iwpbl;
1867 iwpbl->iwmr = iwmr;
1868 iwmr->region = region;
1869 iwmr->ibmr.pd = pd;
1870 iwmr->ibmr.device = pd->device;
1871 ucontext = to_ucontext(pd->uobject->context);
Henry Oroscof26c7c82016-11-30 14:57:40 -06001872
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03001873 iwmr->page_size = PAGE_SIZE;
Henry Oroscof26c7c82016-11-30 14:57:40 -06001874 iwmr->page_msk = PAGE_MASK;
1875
1876 if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
1877 i40iw_set_hugetlb_values(start, iwmr);
1878
1879 region_length = region->length + (start & (iwmr->page_size - 1));
1880 pg_shift = ffs(iwmr->page_size) - 1;
1881 pbl_depth = region_length >> pg_shift;
1882 pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
Faisal Latifd3749842016-01-20 13:40:09 -06001883 iwmr->length = region->length;
1884
1885 iwpbl->user_base = virt;
1886 palloc = &iwpbl->pble_alloc;
1887
1888 iwmr->type = req.reg_type;
Ismail, Mustafa6b900362016-04-18 10:32:54 -05001889 iwmr->page_cnt = (u32)pbl_depth;
Faisal Latifd3749842016-01-20 13:40:09 -06001890
1891 switch (req.reg_type) {
1892 case IW_MEMREG_TYPE_QP:
1893 use_pbles = ((req.sq_pages + req.rq_pages) > 2);
1894 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1895 if (err)
1896 goto error;
1897 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1898 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1899 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1900 break;
1901 case IW_MEMREG_TYPE_CQ:
1902 use_pbles = (req.cq_pages > 1);
1903 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1904 if (err)
1905 goto error;
1906
1907 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1908 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1909 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1910 break;
1911 case IW_MEMREG_TYPE_MEM:
Henry Oroscob6a529d2016-11-30 14:56:14 -06001912 use_pbles = (iwmr->page_cnt != 1);
Faisal Latifd3749842016-01-20 13:40:09 -06001913 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1914
Faisal Latifd3749842016-01-20 13:40:09 -06001915 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1916 if (err)
1917 goto error;
1918
Henry Oroscob6a529d2016-11-30 14:56:14 -06001919 if (use_pbles) {
Henry Oroscof26c7c82016-11-30 14:57:40 -06001920 ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
Henry Oroscob6a529d2016-11-30 14:56:14 -06001921 if (ret) {
1922 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1923 iwpbl->pbl_allocated = false;
1924 }
1925 }
1926
Faisal Latifd3749842016-01-20 13:40:09 -06001927 access |= i40iw_get_user_access(acc);
1928 stag = i40iw_create_stag(iwdev);
1929 if (!stag) {
1930 err = -ENOMEM;
1931 goto error;
1932 }
1933
1934 iwmr->stag = stag;
1935 iwmr->ibmr.rkey = stag;
1936 iwmr->ibmr.lkey = stag;
1937
1938 err = i40iw_hwreg_mr(iwdev, iwmr, access);
1939 if (err) {
1940 i40iw_free_stag(iwdev, stag);
1941 goto error;
1942 }
Henry Oroscof26c7c82016-11-30 14:57:40 -06001943
Faisal Latifd3749842016-01-20 13:40:09 -06001944 break;
1945 default:
1946 goto error;
1947 }
1948
1949 iwmr->type = req.reg_type;
1950 if (req.reg_type == IW_MEMREG_TYPE_MEM)
1951 i40iw_add_pdusecount(iwpd);
1952 return &iwmr->ibmr;
1953
1954error:
Henry Oroscob6a529d2016-11-30 14:56:14 -06001955 if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
Faisal Latifd3749842016-01-20 13:40:09 -06001956 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1957 ib_umem_release(region);
1958 kfree(iwmr);
1959 return ERR_PTR(err);
1960}
1961
1962/**
1963 * i40iw_reg_phys_mr - register kernel physical memory
1964 * @pd: ibpd pointer
1965 * @addr: physical address of memory to register
1966 * @size: size of memory to register
1967 * @acc: Access rights
1968 * @iova_start: start of virtual address for physical buffers
1969 */
1970struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
1971 u64 addr,
1972 u64 size,
1973 int acc,
1974 u64 *iova_start)
1975{
1976 struct i40iw_pd *iwpd = to_iwpd(pd);
1977 struct i40iw_device *iwdev = to_iwdev(pd->device);
1978 struct i40iw_pbl *iwpbl;
1979 struct i40iw_mr *iwmr;
1980 enum i40iw_status_code status;
1981 u32 stag;
1982 u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1983 int ret;
1984
1985 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1986 if (!iwmr)
1987 return ERR_PTR(-ENOMEM);
1988 iwmr->ibmr.pd = pd;
1989 iwmr->ibmr.device = pd->device;
1990 iwpbl = &iwmr->iwpbl;
1991 iwpbl->iwmr = iwmr;
1992 iwmr->type = IW_MEMREG_TYPE_MEM;
1993 iwpbl->user_base = *iova_start;
1994 stag = i40iw_create_stag(iwdev);
1995 if (!stag) {
1996 ret = -EOVERFLOW;
1997 goto err;
1998 }
1999 access |= i40iw_get_user_access(acc);
2000 iwmr->stag = stag;
2001 iwmr->ibmr.rkey = stag;
2002 iwmr->ibmr.lkey = stag;
2003 iwmr->page_cnt = 1;
2004 iwmr->pgaddrmem[0] = addr;
Mustafa Ismail342c3872016-07-12 11:48:40 -05002005 iwmr->length = size;
Faisal Latifd3749842016-01-20 13:40:09 -06002006 status = i40iw_hwreg_mr(iwdev, iwmr, access);
2007 if (status) {
2008 i40iw_free_stag(iwdev, stag);
2009 ret = -ENOMEM;
2010 goto err;
2011 }
2012
2013 i40iw_add_pdusecount(iwpd);
2014 return &iwmr->ibmr;
2015 err:
2016 kfree(iwmr);
2017 return ERR_PTR(ret);
2018}
2019
2020/**
2021 * i40iw_get_dma_mr - register physical mem
2022 * @pd: ptr of pd
2023 * @acc: access for memory
2024 */
2025static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
2026{
2027 u64 kva = 0;
2028
Mustafa Ismail342c3872016-07-12 11:48:40 -05002029 return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
Faisal Latifd3749842016-01-20 13:40:09 -06002030}
2031
2032/**
2033 * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
2034 * @iwmr: iwmr for IB's user page addresses
2035 * @ucontext: ptr to user context
2036 */
2037static void i40iw_del_memlist(struct i40iw_mr *iwmr,
2038 struct i40iw_ucontext *ucontext)
2039{
2040 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2041 unsigned long flags;
2042
2043 switch (iwmr->type) {
2044 case IW_MEMREG_TYPE_CQ:
2045 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2046 if (!list_empty(&ucontext->cq_reg_mem_list))
2047 list_del(&iwpbl->list);
2048 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2049 break;
2050 case IW_MEMREG_TYPE_QP:
2051 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2052 if (!list_empty(&ucontext->qp_reg_mem_list))
2053 list_del(&iwpbl->list);
2054 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2055 break;
2056 default:
2057 break;
2058 }
2059}
2060
2061/**
2062 * i40iw_dereg_mr - deregister mr
2063 * @ib_mr: mr ptr for dereg
2064 */
2065static int i40iw_dereg_mr(struct ib_mr *ib_mr)
2066{
2067 struct ib_pd *ibpd = ib_mr->pd;
2068 struct i40iw_pd *iwpd = to_iwpd(ibpd);
2069 struct i40iw_mr *iwmr = to_iwmr(ib_mr);
2070 struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
2071 enum i40iw_status_code status;
2072 struct i40iw_dealloc_stag_info *info;
2073 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2074 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
2075 struct i40iw_cqp_request *cqp_request;
2076 struct cqp_commands_info *cqp_info;
2077 u32 stag_idx;
2078
2079 if (iwmr->region)
2080 ib_umem_release(iwmr->region);
2081
2082 if (iwmr->type != IW_MEMREG_TYPE_MEM) {
2083 if (ibpd->uobject) {
2084 struct i40iw_ucontext *ucontext;
2085
2086 ucontext = to_ucontext(ibpd->uobject->context);
2087 i40iw_del_memlist(iwmr, ucontext);
2088 }
Tatyana Nikolovaaf56e532017-07-05 21:25:33 -05002089 if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
Faisal Latifd3749842016-01-20 13:40:09 -06002090 i40iw_free_pble(iwdev->pble_rsrc, palloc);
Mustafa Ismail433c5812016-08-23 17:24:56 -05002091 kfree(iwmr);
Faisal Latifd3749842016-01-20 13:40:09 -06002092 return 0;
2093 }
2094
2095 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
2096 if (!cqp_request)
2097 return -ENOMEM;
2098
2099 cqp_info = &cqp_request->info;
2100 info = &cqp_info->in.u.dealloc_stag.info;
2101 memset(info, 0, sizeof(*info));
2102
2103 info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
2104 info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
2105 stag_idx = info->stag_idx;
2106 info->mr = true;
2107 if (iwpbl->pbl_allocated)
2108 info->dealloc_pbl = true;
2109
2110 cqp_info->cqp_cmd = OP_DEALLOC_STAG;
2111 cqp_info->post_sq = 1;
2112 cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
2113 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2114 status = i40iw_handle_cqp_op(iwdev, cqp_request);
2115 if (status)
2116 i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
2117 i40iw_rem_pdusecount(iwpd, iwdev);
2118 i40iw_free_stag(iwdev, iwmr->stag);
2119 if (iwpbl->pbl_allocated)
2120 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2121 kfree(iwmr);
2122 return 0;
2123}
2124
2125/**
2126 * i40iw_show_rev
2127 */
2128static ssize_t i40iw_show_rev(struct device *dev,
2129 struct device_attribute *attr, char *buf)
2130{
2131 struct i40iw_ib_device *iwibdev = container_of(dev,
2132 struct i40iw_ib_device,
2133 ibdev.dev);
2134 u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
2135
2136 return sprintf(buf, "%x\n", hw_rev);
2137}
2138
2139/**
Faisal Latifd3749842016-01-20 13:40:09 -06002140 * i40iw_show_hca
2141 */
2142static ssize_t i40iw_show_hca(struct device *dev,
2143 struct device_attribute *attr, char *buf)
2144{
2145 return sprintf(buf, "I40IW\n");
2146}
2147
2148/**
2149 * i40iw_show_board
2150 */
2151static ssize_t i40iw_show_board(struct device *dev,
2152 struct device_attribute *attr,
2153 char *buf)
2154{
2155 return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
2156}
2157
2158static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
Faisal Latifd3749842016-01-20 13:40:09 -06002159static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
2160static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
2161
2162static struct device_attribute *i40iw_dev_attributes[] = {
2163 &dev_attr_hw_rev,
Faisal Latifd3749842016-01-20 13:40:09 -06002164 &dev_attr_hca_type,
2165 &dev_attr_board_id
2166};
2167
2168/**
2169 * i40iw_copy_sg_list - copy sg list for qp
2170 * @sg_list: copied into sg_list
2171 * @sgl: copy from sgl
2172 * @num_sges: count of sg entries
2173 */
2174static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
2175{
2176 unsigned int i;
2177
2178 for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
2179 sg_list[i].tag_off = sgl[i].addr;
2180 sg_list[i].len = sgl[i].length;
2181 sg_list[i].stag = sgl[i].lkey;
2182 }
2183}
2184
2185/**
2186 * i40iw_post_send - kernel application wr
2187 * @ibqp: qp ptr for wr
2188 * @ib_wr: work request ptr
2189 * @bad_wr: return of bad wr if err
2190 */
2191static int i40iw_post_send(struct ib_qp *ibqp,
2192 struct ib_send_wr *ib_wr,
2193 struct ib_send_wr **bad_wr)
2194{
2195 struct i40iw_qp *iwqp;
2196 struct i40iw_qp_uk *ukqp;
2197 struct i40iw_post_sq_info info;
2198 enum i40iw_status_code ret;
2199 int err = 0;
2200 unsigned long flags;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002201 bool inv_stag;
Faisal Latifd3749842016-01-20 13:40:09 -06002202
2203 iwqp = (struct i40iw_qp *)ibqp;
2204 ukqp = &iwqp->sc_qp.qp_uk;
2205
2206 spin_lock_irqsave(&iwqp->lock, flags);
2207 while (ib_wr) {
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002208 inv_stag = false;
Faisal Latifd3749842016-01-20 13:40:09 -06002209 memset(&info, 0, sizeof(info));
2210 info.wr_id = (u64)(ib_wr->wr_id);
2211 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2212 info.signaled = true;
2213 if (ib_wr->send_flags & IB_SEND_FENCE)
2214 info.read_fence = true;
2215
2216 switch (ib_wr->opcode) {
2217 case IB_WR_SEND:
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002218 /* fall-through */
2219 case IB_WR_SEND_WITH_INV:
2220 if (ib_wr->opcode == IB_WR_SEND) {
2221 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2222 info.op_type = I40IW_OP_TYPE_SEND_SOL;
2223 else
2224 info.op_type = I40IW_OP_TYPE_SEND;
2225 } else {
2226 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2227 info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
2228 else
2229 info.op_type = I40IW_OP_TYPE_SEND_INV;
2230 }
Faisal Latifd3749842016-01-20 13:40:09 -06002231
2232 if (ib_wr->send_flags & IB_SEND_INLINE) {
2233 info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2234 info.op.inline_send.len = ib_wr->sg_list[0].length;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002235 ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
Faisal Latifd3749842016-01-20 13:40:09 -06002236 } else {
2237 info.op.send.num_sges = ib_wr->num_sge;
2238 info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002239 ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
Faisal Latifd3749842016-01-20 13:40:09 -06002240 }
2241
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002242 if (ret) {
2243 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2244 err = -ENOMEM;
2245 else
2246 err = -EINVAL;
2247 }
Faisal Latifd3749842016-01-20 13:40:09 -06002248 break;
2249 case IB_WR_RDMA_WRITE:
2250 info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
2251
2252 if (ib_wr->send_flags & IB_SEND_INLINE) {
2253 info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2254 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2255 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2256 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2257 info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
2258 ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
2259 } else {
2260 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2261 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2262 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2263 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2264 info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
2265 ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
2266 }
2267
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002268 if (ret) {
2269 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2270 err = -ENOMEM;
2271 else
2272 err = -EINVAL;
2273 }
Faisal Latifd3749842016-01-20 13:40:09 -06002274 break;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002275 case IB_WR_RDMA_READ_WITH_INV:
2276 inv_stag = true;
2277 /* fall-through*/
Faisal Latifd3749842016-01-20 13:40:09 -06002278 case IB_WR_RDMA_READ:
Shiraz Saleem6c2f7612016-04-22 14:14:27 -05002279 if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
2280 err = -EINVAL;
2281 break;
2282 }
Faisal Latifd3749842016-01-20 13:40:09 -06002283 info.op_type = I40IW_OP_TYPE_RDMA_READ;
2284 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2285 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2286 info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
2287 info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
2288 info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
2289 info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002290 ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002291 if (ret) {
2292 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2293 err = -ENOMEM;
2294 else
2295 err = -EINVAL;
2296 }
Faisal Latifd3749842016-01-20 13:40:09 -06002297 break;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002298 case IB_WR_LOCAL_INV:
2299 info.op_type = I40IW_OP_TYPE_INV_STAG;
2300 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2301 ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
2302 if (ret)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002303 err = -ENOMEM;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002304 break;
2305 case IB_WR_REG_MR:
2306 {
2307 struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002308 int flags = reg_wr(ib_wr)->access;
2309 struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2310 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2311 struct i40iw_fast_reg_stag_info info;
2312
Shiraz Saleem7748e492016-06-14 16:54:19 -05002313 memset(&info, 0, sizeof(info));
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002314 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2315 info.access_rights |= i40iw_get_user_access(flags);
2316 info.stag_key = reg_wr(ib_wr)->key & 0xff;
2317 info.stag_idx = reg_wr(ib_wr)->key >> 8;
Henry Oroscoe6779182016-11-09 21:33:32 -06002318 info.page_size = reg_wr(ib_wr)->mr->page_size;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002319 info.wr_id = ib_wr->wr_id;
2320
2321 info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2322 info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2323 info.total_len = iwmr->ibmr.length;
Shiraz Saleem7748e492016-06-14 16:54:19 -05002324 info.reg_addr_pa = *(u64 *)palloc->level1.addr;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002325 info.first_pm_pbl_index = palloc->level1.idx;
2326 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2327 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2328
Shiraz Saleem7748e492016-06-14 16:54:19 -05002329 if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2330 info.chunk_size = 1;
2331
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002332 ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2333 if (ret)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002334 err = -ENOMEM;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002335 break;
2336 }
Faisal Latifd3749842016-01-20 13:40:09 -06002337 default:
2338 err = -EINVAL;
2339 i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2340 ib_wr->opcode);
2341 break;
2342 }
2343
2344 if (err)
2345 break;
2346 ib_wr = ib_wr->next;
2347 }
2348
2349 if (err)
2350 *bad_wr = ib_wr;
2351 else
2352 ukqp->ops.iw_qp_post_wr(ukqp);
2353 spin_unlock_irqrestore(&iwqp->lock, flags);
2354
2355 return err;
2356}
2357
2358/**
2359 * i40iw_post_recv - post receive wr for kernel application
2360 * @ibqp: ib qp pointer
2361 * @ib_wr: work request for receive
2362 * @bad_wr: bad wr caused an error
2363 */
2364static int i40iw_post_recv(struct ib_qp *ibqp,
2365 struct ib_recv_wr *ib_wr,
2366 struct ib_recv_wr **bad_wr)
2367{
2368 struct i40iw_qp *iwqp;
2369 struct i40iw_qp_uk *ukqp;
2370 struct i40iw_post_rq_info post_recv;
2371 struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
2372 enum i40iw_status_code ret = 0;
2373 unsigned long flags;
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002374 int err = 0;
Faisal Latifd3749842016-01-20 13:40:09 -06002375
2376 iwqp = (struct i40iw_qp *)ibqp;
2377 ukqp = &iwqp->sc_qp.qp_uk;
2378
2379 memset(&post_recv, 0, sizeof(post_recv));
2380 spin_lock_irqsave(&iwqp->lock, flags);
2381 while (ib_wr) {
2382 post_recv.num_sges = ib_wr->num_sge;
2383 post_recv.wr_id = ib_wr->wr_id;
2384 i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2385 post_recv.sg_list = sg_list;
2386 ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
2387 if (ret) {
2388 i40iw_pr_err(" post_recv err %d\n", ret);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002389 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2390 err = -ENOMEM;
2391 else
2392 err = -EINVAL;
Faisal Latifd3749842016-01-20 13:40:09 -06002393 *bad_wr = ib_wr;
2394 goto out;
2395 }
2396 ib_wr = ib_wr->next;
2397 }
2398 out:
2399 spin_unlock_irqrestore(&iwqp->lock, flags);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002400 return err;
Faisal Latifd3749842016-01-20 13:40:09 -06002401}
2402
2403/**
2404 * i40iw_poll_cq - poll cq for completion (kernel apps)
2405 * @ibcq: cq to poll
2406 * @num_entries: number of entries to poll
2407 * @entry: wr of entry completed
2408 */
2409static int i40iw_poll_cq(struct ib_cq *ibcq,
2410 int num_entries,
2411 struct ib_wc *entry)
2412{
2413 struct i40iw_cq *iwcq;
2414 int cqe_count = 0;
2415 struct i40iw_cq_poll_info cq_poll_info;
2416 enum i40iw_status_code ret;
2417 struct i40iw_cq_uk *ukcq;
2418 struct i40iw_sc_qp *qp;
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05002419 struct i40iw_qp *iwqp;
Faisal Latifd3749842016-01-20 13:40:09 -06002420 unsigned long flags;
2421
2422 iwcq = (struct i40iw_cq *)ibcq;
2423 ukcq = &iwcq->sc_cq.cq_uk;
2424
2425 spin_lock_irqsave(&iwcq->lock, flags);
2426 while (cqe_count < num_entries) {
Mustafa Ismailb54143b2016-07-12 11:48:42 -05002427 ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
Faisal Latifd3749842016-01-20 13:40:09 -06002428 if (ret == I40IW_ERR_QUEUE_EMPTY) {
2429 break;
Tatyana Nikolovaf8a4e762016-04-22 14:14:28 -05002430 } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
2431 continue;
Faisal Latifd3749842016-01-20 13:40:09 -06002432 } else if (ret) {
2433 if (!cqe_count)
2434 cqe_count = -1;
2435 break;
2436 }
2437 entry->wc_flags = 0;
2438 entry->wr_id = cq_poll_info.wr_id;
Ismail, Mustafadf356302016-04-18 10:33:00 -05002439 if (cq_poll_info.error) {
Faisal Latifd3749842016-01-20 13:40:09 -06002440 entry->status = IB_WC_WR_FLUSH_ERR;
Ismail, Mustafadf356302016-04-18 10:33:00 -05002441 entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
2442 } else {
2443 entry->status = IB_WC_SUCCESS;
2444 }
Faisal Latifd3749842016-01-20 13:40:09 -06002445
2446 switch (cq_poll_info.op_type) {
2447 case I40IW_OP_TYPE_RDMA_WRITE:
2448 entry->opcode = IB_WC_RDMA_WRITE;
2449 break;
2450 case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
2451 case I40IW_OP_TYPE_RDMA_READ:
2452 entry->opcode = IB_WC_RDMA_READ;
2453 break;
2454 case I40IW_OP_TYPE_SEND_SOL:
2455 case I40IW_OP_TYPE_SEND_SOL_INV:
2456 case I40IW_OP_TYPE_SEND_INV:
2457 case I40IW_OP_TYPE_SEND:
2458 entry->opcode = IB_WC_SEND;
2459 break;
2460 case I40IW_OP_TYPE_REC:
2461 entry->opcode = IB_WC_RECV;
2462 break;
2463 default:
2464 entry->opcode = IB_WC_RECV;
2465 break;
2466 }
2467
Faisal Latifd3749842016-01-20 13:40:09 -06002468 entry->ex.imm_data = 0;
2469 qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
2470 entry->qp = (struct ib_qp *)qp->back_qp;
2471 entry->src_qp = cq_poll_info.qp_id;
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05002472 iwqp = (struct i40iw_qp *)qp->back_qp;
2473 if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2474 if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
2475 complete(&iwqp->sq_drained);
2476 if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
2477 complete(&iwqp->rq_drained);
2478 }
Faisal Latifd3749842016-01-20 13:40:09 -06002479 entry->byte_len = cq_poll_info.bytes_xfered;
2480 entry++;
2481 cqe_count++;
2482 }
2483 spin_unlock_irqrestore(&iwcq->lock, flags);
2484 return cqe_count;
2485}
2486
2487/**
2488 * i40iw_req_notify_cq - arm cq kernel application
2489 * @ibcq: cq to arm
2490 * @notify_flags: notofication flags
2491 */
2492static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2493 enum ib_cq_notify_flags notify_flags)
2494{
2495 struct i40iw_cq *iwcq;
2496 struct i40iw_cq_uk *ukcq;
Shiraz Saleem747f1c62016-06-14 16:54:16 -05002497 unsigned long flags;
2498 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
Faisal Latifd3749842016-01-20 13:40:09 -06002499
2500 iwcq = (struct i40iw_cq *)ibcq;
2501 ukcq = &iwcq->sc_cq.cq_uk;
Shiraz Saleem747f1c62016-06-14 16:54:16 -05002502 if (notify_flags == IB_CQ_SOLICITED)
2503 cq_notify = IW_CQ_COMPL_SOLICITED;
2504 spin_lock_irqsave(&iwcq->lock, flags);
Faisal Latifd3749842016-01-20 13:40:09 -06002505 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
Shiraz Saleem747f1c62016-06-14 16:54:16 -05002506 spin_unlock_irqrestore(&iwcq->lock, flags);
Faisal Latifd3749842016-01-20 13:40:09 -06002507 return 0;
2508}
2509
2510/**
2511 * i40iw_port_immutable - return port's immutable data
2512 * @ibdev: ib dev struct
2513 * @port_num: port number
2514 * @immutable: immutable data for the port return
2515 */
2516static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2517 struct ib_port_immutable *immutable)
2518{
2519 struct ib_port_attr attr;
2520 int err;
2521
Or Gerlitzc4550c62017-01-24 13:02:39 +02002522 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2523
2524 err = ib_query_port(ibdev, port_num, &attr);
Faisal Latifd3749842016-01-20 13:40:09 -06002525
2526 if (err)
2527 return err;
2528
2529 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2530 immutable->gid_tbl_len = attr.gid_tbl_len;
Faisal Latifd3749842016-01-20 13:40:09 -06002531
2532 return 0;
2533}
2534
Christoph Lameterb40f4752016-05-16 12:49:33 -05002535static const char * const i40iw_hw_stat_names[] = {
2536 // 32bit names
2537 [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2538 [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2539 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2540 [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2541 [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2542 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2543 [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2544 [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2545 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2546 // 64bit names
2547 [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2548 "ip4InOctets",
2549 [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2550 "ip4InPkts",
2551 [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2552 "ip4InReasmRqd",
2553 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2554 "ip4InMcastPkts",
2555 [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2556 "ip4OutOctets",
2557 [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2558 "ip4OutPkts",
2559 [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2560 "ip4OutSegRqd",
2561 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2562 "ip4OutMcastPkts",
2563 [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2564 "ip6InOctets",
2565 [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2566 "ip6InPkts",
2567 [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2568 "ip6InReasmRqd",
2569 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2570 "ip6InMcastPkts",
2571 [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2572 "ip6OutOctets",
2573 [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2574 "ip6OutPkts",
2575 [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2576 "ip6OutSegRqd",
2577 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2578 "ip6OutMcastPkts",
2579 [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2580 "tcpInSegs",
2581 [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2582 "tcpOutSegs",
2583 [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2584 "iwInRdmaReads",
2585 [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2586 "iwInRdmaSends",
2587 [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2588 "iwInRdmaWrites",
2589 [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2590 "iwOutRdmaReads",
2591 [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2592 "iwOutRdmaSends",
2593 [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2594 "iwOutRdmaWrites",
2595 [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2596 "iwRdmaBnd",
2597 [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2598 "iwRdmaInv"
2599};
2600
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002601static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
Ira Weinyf65c52c2016-06-15 02:21:59 -04002602{
2603 u32 firmware_version = I40IW_FW_VERSION;
2604
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002605 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version,
2606 (firmware_version & 0x000000ff));
Ira Weinyf65c52c2016-06-15 02:21:59 -04002607}
2608
Faisal Latifd3749842016-01-20 13:40:09 -06002609/**
Christoph Lameterb40f4752016-05-16 12:49:33 -05002610 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2611 * @ibdev: device pointer from stack
2612 * @port_num: port number
Faisal Latifd3749842016-01-20 13:40:09 -06002613 */
Christoph Lameterb40f4752016-05-16 12:49:33 -05002614static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2615 u8 port_num)
2616{
2617 struct i40iw_device *iwdev = to_iwdev(ibdev);
2618 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2619 int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2620 I40IW_HW_STAT_INDEX_MAX_64;
2621 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2622
2623 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2624 (I40IW_HW_STAT_INDEX_MAX_32 +
2625 I40IW_HW_STAT_INDEX_MAX_64));
2626
2627 /*
2628 * PFs get the default update lifespan, but VFs only update once
2629 * per second
2630 */
2631 if (!dev->is_pf)
2632 lifespan = 1000;
2633 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2634 lifespan);
2635}
2636
2637/**
2638 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2639 * @ibdev: device pointer from stack
2640 * @stats: stats pointer from stack
2641 * @port_num: port number
2642 * @index: which hw counter the stack is requesting we update
2643 */
2644static int i40iw_get_hw_stats(struct ib_device *ibdev,
2645 struct rdma_hw_stats *stats,
2646 u8 port_num, int index)
Faisal Latifd3749842016-01-20 13:40:09 -06002647{
2648 struct i40iw_device *iwdev = to_iwdev(ibdev);
2649 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002650 struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
Faisal Latifd3749842016-01-20 13:40:09 -06002651 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
Faisal Latifd3749842016-01-20 13:40:09 -06002652
Faisal Latifd3749842016-01-20 13:40:09 -06002653 if (dev->is_pf) {
Henry Oroscod6f7bbc2016-12-06 16:16:20 -06002654 i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
Faisal Latifd3749842016-01-20 13:40:09 -06002655 } else {
Christoph Lameterb40f4752016-05-16 12:49:33 -05002656 if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2657 return -ENOSYS;
Faisal Latifd3749842016-01-20 13:40:09 -06002658 }
2659
Shiraz Saleem91c42b72016-11-11 10:55:41 -06002660 memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
Faisal Latifd3749842016-01-20 13:40:09 -06002661
Christoph Lameterb40f4752016-05-16 12:49:33 -05002662 return stats->num_counters;
Faisal Latifd3749842016-01-20 13:40:09 -06002663}
2664
2665/**
2666 * i40iw_query_gid - Query port GID
2667 * @ibdev: device pointer from stack
2668 * @port: port number
2669 * @index: Entry index
2670 * @gid: Global ID
2671 */
2672static int i40iw_query_gid(struct ib_device *ibdev,
2673 u8 port,
2674 int index,
2675 union ib_gid *gid)
2676{
2677 struct i40iw_device *iwdev = to_iwdev(ibdev);
2678
2679 memset(gid->raw, 0, sizeof(gid->raw));
2680 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2681 return 0;
2682}
2683
2684/**
2685 * i40iw_modify_port Modify port properties
2686 * @ibdev: device pointer from stack
2687 * @port: port number
2688 * @port_modify_mask: mask for port modifications
2689 * @props: port properties
2690 */
2691static int i40iw_modify_port(struct ib_device *ibdev,
2692 u8 port,
2693 int port_modify_mask,
2694 struct ib_port_modify *props)
2695{
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002696 return -ENOSYS;
Faisal Latifd3749842016-01-20 13:40:09 -06002697}
2698
2699/**
2700 * i40iw_query_pkey - Query partition key
2701 * @ibdev: device pointer from stack
2702 * @port: port number
2703 * @index: index of pkey
2704 * @pkey: pointer to store the pkey
2705 */
2706static int i40iw_query_pkey(struct ib_device *ibdev,
2707 u8 port,
2708 u16 index,
2709 u16 *pkey)
2710{
2711 *pkey = 0;
2712 return 0;
2713}
2714
2715/**
2716 * i40iw_create_ah - create address handle
2717 * @ibpd: ptr of pd
2718 * @ah_attr: address handle attributes
2719 */
2720static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002721 struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002722 struct ib_udata *udata)
2723
Faisal Latifd3749842016-01-20 13:40:09 -06002724{
2725 return ERR_PTR(-ENOSYS);
2726}
2727
2728/**
2729 * i40iw_destroy_ah - Destroy address handle
2730 * @ah: pointer to address handle
2731 */
2732static int i40iw_destroy_ah(struct ib_ah *ah)
2733{
2734 return -ENOSYS;
2735}
2736
2737/**
2738 * i40iw_init_rdma_device - initialization of iwarp device
2739 * @iwdev: iwarp device
2740 */
2741static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2742{
2743 struct i40iw_ib_device *iwibdev;
2744 struct net_device *netdev = iwdev->netdev;
2745 struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
2746
2747 iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
2748 if (!iwibdev) {
2749 i40iw_pr_err("iwdev == NULL\n");
2750 return NULL;
2751 }
2752 strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
2753 iwibdev->ibdev.owner = THIS_MODULE;
2754 iwdev->iwibdev = iwibdev;
2755 iwibdev->iwdev = iwdev;
2756
2757 iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
2758 ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
2759
2760 iwibdev->ibdev.uverbs_cmd_mask =
2761 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2762 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2763 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2764 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2765 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2766 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2767 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2768 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2769 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2770 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2771 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2772 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2773 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2774 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2775 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2776 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2777 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2778 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2779 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2780 (1ull << IB_USER_VERBS_CMD_POST_SEND);
2781 iwibdev->ibdev.phys_port_cnt = 1;
Henry Oroscoe69c5092016-11-09 21:24:48 -06002782 iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
Faisal Latifd3749842016-01-20 13:40:09 -06002783 iwibdev->ibdev.dev.parent = &pcidev->dev;
2784 iwibdev->ibdev.query_port = i40iw_query_port;
2785 iwibdev->ibdev.modify_port = i40iw_modify_port;
2786 iwibdev->ibdev.query_pkey = i40iw_query_pkey;
2787 iwibdev->ibdev.query_gid = i40iw_query_gid;
2788 iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
2789 iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
2790 iwibdev->ibdev.mmap = i40iw_mmap;
2791 iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
2792 iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
2793 iwibdev->ibdev.create_qp = i40iw_create_qp;
2794 iwibdev->ibdev.modify_qp = i40iw_modify_qp;
2795 iwibdev->ibdev.query_qp = i40iw_query_qp;
2796 iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
2797 iwibdev->ibdev.create_cq = i40iw_create_cq;
2798 iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
2799 iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
2800 iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
2801 iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
Christoph Lameterb40f4752016-05-16 12:49:33 -05002802 iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
2803 iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
Faisal Latifd3749842016-01-20 13:40:09 -06002804 iwibdev->ibdev.query_device = i40iw_query_device;
2805 iwibdev->ibdev.create_ah = i40iw_create_ah;
2806 iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
Ismail, Mustafac2b75ef2016-04-18 10:33:09 -05002807 iwibdev->ibdev.drain_sq = i40iw_drain_sq;
2808 iwibdev->ibdev.drain_rq = i40iw_drain_rq;
Ismail, Mustafab7aee852016-04-18 10:33:06 -05002809 iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
2810 iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
Faisal Latifd3749842016-01-20 13:40:09 -06002811 iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
2812 if (!iwibdev->ibdev.iwcm) {
2813 ib_dealloc_device(&iwibdev->ibdev);
Faisal Latifd3749842016-01-20 13:40:09 -06002814 return NULL;
2815 }
2816
2817 iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
2818 iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
2819 iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
2820 iwibdev->ibdev.iwcm->connect = i40iw_connect;
2821 iwibdev->ibdev.iwcm->accept = i40iw_accept;
2822 iwibdev->ibdev.iwcm->reject = i40iw_reject;
2823 iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
2824 iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002825 memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
2826 sizeof(iwibdev->ibdev.iwcm->ifname));
Faisal Latifd3749842016-01-20 13:40:09 -06002827 iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
Ira Weinyf65c52c2016-06-15 02:21:59 -04002828 iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
Faisal Latifd3749842016-01-20 13:40:09 -06002829 iwibdev->ibdev.poll_cq = i40iw_poll_cq;
2830 iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
2831 iwibdev->ibdev.post_send = i40iw_post_send;
2832 iwibdev->ibdev.post_recv = i40iw_post_recv;
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002833
Faisal Latifd3749842016-01-20 13:40:09 -06002834 return iwibdev;
2835}
2836
2837/**
2838 * i40iw_port_ibevent - indicate port event
2839 * @iwdev: iwarp device
2840 */
2841void i40iw_port_ibevent(struct i40iw_device *iwdev)
2842{
2843 struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2844 struct ib_event event;
2845
2846 event.device = &iwibdev->ibdev;
2847 event.element.port_num = 1;
2848 event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2849 ib_dispatch_event(&event);
2850}
2851
2852/**
2853 * i40iw_unregister_rdma_device - unregister of iwarp from IB
2854 * @iwibdev: rdma device ptr
2855 */
2856static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
2857{
2858 int i;
2859
2860 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
2861 device_remove_file(&iwibdev->ibdev.dev,
2862 i40iw_dev_attributes[i]);
2863 ib_unregister_device(&iwibdev->ibdev);
2864}
2865
2866/**
2867 * i40iw_destroy_rdma_device - destroy rdma device and free resources
2868 * @iwibdev: IB device ptr
2869 */
2870void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
2871{
2872 if (!iwibdev)
2873 return;
2874
2875 i40iw_unregister_rdma_device(iwibdev);
2876 kfree(iwibdev->ibdev.iwcm);
2877 iwibdev->ibdev.iwcm = NULL;
Mustafa Ismaild5965932016-11-30 14:59:26 -06002878 wait_event_timeout(iwibdev->iwdev->close_wq,
2879 !atomic64_read(&iwibdev->iwdev->use_count),
2880 I40IW_EVENT_TIMEOUT);
Faisal Latifd3749842016-01-20 13:40:09 -06002881 ib_dealloc_device(&iwibdev->ibdev);
2882}
2883
2884/**
2885 * i40iw_register_rdma_device - register iwarp device to IB
2886 * @iwdev: iwarp device
2887 */
2888int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2889{
2890 int i, ret;
2891 struct i40iw_ib_device *iwibdev;
2892
2893 iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2894 if (!iwdev->iwibdev)
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002895 return -ENOMEM;
Faisal Latifd3749842016-01-20 13:40:09 -06002896 iwibdev = iwdev->iwibdev;
2897
2898 ret = ib_register_device(&iwibdev->ibdev, NULL);
2899 if (ret)
2900 goto error;
2901
2902 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
2903 ret =
2904 device_create_file(&iwibdev->ibdev.dev,
2905 i40iw_dev_attributes[i]);
2906 if (ret) {
2907 while (i > 0) {
2908 i--;
2909 device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
2910 }
2911 ib_unregister_device(&iwibdev->ibdev);
2912 goto error;
2913 }
2914 }
2915 return 0;
2916error:
2917 kfree(iwdev->iwibdev->ibdev.iwcm);
2918 iwdev->iwibdev->ibdev.iwcm = NULL;
2919 ib_dealloc_device(&iwdev->iwibdev->ibdev);
Shiraz Saleemfe5d6e62016-07-12 11:48:39 -05002920 return ret;
Faisal Latifd3749842016-01-20 13:40:09 -06002921}