blob: 5bb61eb58f2c71859969d73ac6e326d4dafc51fd [file] [log] [blame]
Parav Panditfe2caef2012-03-21 04:09:06 +05301/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h>
30#include <rdma/ib_user_verbs.h>
31#include <rdma/iw_cm.h>
32#include <rdma/ib_umem.h>
33#include <rdma/ib_addr.h>
34
35#include "ocrdma.h"
36#include "ocrdma_hw.h"
37#include "ocrdma_verbs.h"
38#include "ocrdma_abi.h"
39
40int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41{
42 if (index > 1)
43 return -EINVAL;
44
45 *pkey = 0xffff;
46 return 0;
47}
48
49int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 int index, union ib_gid *sgid)
51{
52 struct ocrdma_dev *dev;
53
54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid));
Dan Carpenter59a39ca2015-02-16 13:01:36 +030056 if (index >= OCRDMA_MAX_SGID)
Parav Panditfe2caef2012-03-21 04:09:06 +053057 return -EINVAL;
58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60
61 return 0;
62}
63
Matan Barak2528e332015-06-11 16:35:25 +030064int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
65 struct ib_udata *uhw)
Parav Panditfe2caef2012-03-21 04:09:06 +053066{
67 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
68
Matan Barak2528e332015-06-11 16:35:25 +030069 if (uhw->inlen || uhw->outlen)
70 return -EINVAL;
71
Parav Panditfe2caef2012-03-21 04:09:06 +053072 memset(attr, 0, sizeof *attr);
73 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
74 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
75 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
Mitesh Ahuja033edd42014-06-10 19:32:22 +053076 attr->max_mr_size = dev->attr.max_mr_size;
Parav Panditfe2caef2012-03-21 04:09:06 +053077 attr->page_size_cap = 0xffff000;
78 attr->vendor_id = dev->nic_info.pdev->vendor;
79 attr->vendor_part_id = dev->nic_info.pdev->device;
Mitesh Ahuja96c51ab2014-07-02 11:36:06 +053080 attr->hw_ver = dev->asic_id;
Parav Panditfe2caef2012-03-21 04:09:06 +053081 attr->max_qp = dev->attr.max_qp;
Naresh Gottumukkalad3cb6c02013-08-26 15:27:40 +053082 attr->max_ah = OCRDMA_MAX_AH;
Parav Panditfe2caef2012-03-21 04:09:06 +053083 attr->max_qp_wr = dev->attr.max_wqe;
84
85 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
86 IB_DEVICE_RC_RNR_NAK_GEN |
87 IB_DEVICE_SHUTDOWN_PORT |
88 IB_DEVICE_SYS_IMAGE_GUID |
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +053089 IB_DEVICE_LOCAL_DMA_LKEY |
90 IB_DEVICE_MEM_MGT_EXTENSIONS;
Mahesh Vardhamanaiah634c5792012-06-08 21:26:11 +053091 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
Naresh Gottumukkalac43e9ab2013-08-26 15:27:46 +053092 attr->max_sge_rd = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +053093 attr->max_cq = dev->attr.max_cq;
94 attr->max_cqe = dev->attr.max_cqe;
95 attr->max_mr = dev->attr.max_mr;
Selvin Xavierac578ae2014-02-04 11:57:04 +053096 attr->max_mw = dev->attr.max_mw;
Parav Panditfe2caef2012-03-21 04:09:06 +053097 attr->max_pd = dev->attr.max_pd;
98 attr->atomic_cap = 0;
99 attr->max_fmr = 0;
100 attr->max_map_per_fmr = 0;
101 attr->max_qp_rd_atom =
102 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
103 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +0530104 attr->max_srq = dev->attr.max_srq;
Roland Dreierd1e09eb2012-07-07 15:13:47 -0700105 attr->max_srq_sge = dev->attr.max_srq_sge;
Parav Panditfe2caef2012-03-21 04:09:06 +0530106 attr->max_srq_wr = dev->attr.max_rqe;
107 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
Devesh Sharmad6a488f2014-06-09 10:52:37 +0530108 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
Parav Panditfe2caef2012-03-21 04:09:06 +0530109 attr->max_pkeys = 1;
110 return 0;
111}
112
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530113static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
114 u8 *ib_speed, u8 *ib_width)
115{
116 int status;
117 u8 speed;
118
119 status = ocrdma_mbx_get_link_speed(dev, &speed);
120 if (status)
121 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
122
123 switch (speed) {
124 case OCRDMA_PHYS_LINK_SPEED_1GBPS:
125 *ib_speed = IB_SPEED_SDR;
126 *ib_width = IB_WIDTH_1X;
127 break;
128
129 case OCRDMA_PHYS_LINK_SPEED_10GBPS:
130 *ib_speed = IB_SPEED_QDR;
131 *ib_width = IB_WIDTH_1X;
132 break;
133
134 case OCRDMA_PHYS_LINK_SPEED_20GBPS:
135 *ib_speed = IB_SPEED_DDR;
136 *ib_width = IB_WIDTH_4X;
137 break;
138
139 case OCRDMA_PHYS_LINK_SPEED_40GBPS:
140 *ib_speed = IB_SPEED_QDR;
141 *ib_width = IB_WIDTH_4X;
142 break;
143
144 default:
145 /* Unsupported */
146 *ib_speed = IB_SPEED_SDR;
147 *ib_width = IB_WIDTH_1X;
Joe Perches2b50176d2013-10-08 16:07:22 -0700148 }
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530149}
150
Parav Panditfe2caef2012-03-21 04:09:06 +0530151int ocrdma_query_port(struct ib_device *ibdev,
152 u8 port, struct ib_port_attr *props)
153{
154 enum ib_port_state port_state;
155 struct ocrdma_dev *dev;
156 struct net_device *netdev;
157
158 dev = get_ocrdma_dev(ibdev);
159 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000160 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
161 dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530162 return -EINVAL;
163 }
164 netdev = dev->nic_info.netdev;
165 if (netif_running(netdev) && netif_oper_up(netdev)) {
166 port_state = IB_PORT_ACTIVE;
167 props->phys_state = 5;
168 } else {
169 port_state = IB_PORT_DOWN;
170 props->phys_state = 3;
171 }
172 props->max_mtu = IB_MTU_4096;
173 props->active_mtu = iboe_get_mtu(netdev->mtu);
174 props->lid = 0;
175 props->lmc = 0;
176 props->sm_lid = 0;
177 props->sm_sl = 0;
178 props->state = port_state;
179 props->port_cap_flags =
180 IB_PORT_CM_SUP |
181 IB_PORT_REINIT_SUP |
Moni Shouab4a26a22014-02-09 11:54:34 +0200182 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
Parav Panditfe2caef2012-03-21 04:09:06 +0530183 props->gid_tbl_len = OCRDMA_MAX_SGID;
184 props->pkey_tbl_len = 1;
185 props->bad_pkey_cntr = 0;
186 props->qkey_viol_cntr = 0;
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530187 get_link_speed_and_width(dev, &props->active_speed,
188 &props->active_width);
Parav Panditfe2caef2012-03-21 04:09:06 +0530189 props->max_msg_sz = 0x80000000;
190 props->max_vl_num = 4;
191 return 0;
192}
193
194int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
195 struct ib_port_modify *props)
196{
197 struct ocrdma_dev *dev;
198
199 dev = get_ocrdma_dev(ibdev);
200 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000201 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530202 return -EINVAL;
203 }
204 return 0;
205}
206
207static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
208 unsigned long len)
209{
210 struct ocrdma_mm *mm;
211
212 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
213 if (mm == NULL)
214 return -ENOMEM;
215 mm->key.phy_addr = phy_addr;
216 mm->key.len = len;
217 INIT_LIST_HEAD(&mm->entry);
218
219 mutex_lock(&uctx->mm_list_lock);
220 list_add_tail(&mm->entry, &uctx->mm_head);
221 mutex_unlock(&uctx->mm_list_lock);
222 return 0;
223}
224
225static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
226 unsigned long len)
227{
228 struct ocrdma_mm *mm, *tmp;
229
230 mutex_lock(&uctx->mm_list_lock);
231 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530232 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530233 continue;
234
235 list_del(&mm->entry);
236 kfree(mm);
237 break;
238 }
239 mutex_unlock(&uctx->mm_list_lock);
240}
241
242static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
243 unsigned long len)
244{
245 bool found = false;
246 struct ocrdma_mm *mm;
247
248 mutex_lock(&uctx->mm_list_lock);
249 list_for_each_entry(mm, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530250 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530251 continue;
252
253 found = true;
254 break;
255 }
256 mutex_unlock(&uctx->mm_list_lock);
257 return found;
258}
259
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530260
261static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
262{
263 u16 pd_bitmap_idx = 0;
264 const unsigned long *pd_bitmap;
265
266 if (dpp_pool) {
267 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
268 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
269 dev->pd_mgr->max_dpp_pd);
270 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
271 dev->pd_mgr->pd_dpp_count++;
272 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
273 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
274 } else {
275 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
276 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
277 dev->pd_mgr->max_normal_pd);
278 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
279 dev->pd_mgr->pd_norm_count++;
280 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
281 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
282 }
283 return pd_bitmap_idx;
284}
285
286static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
287 bool dpp_pool)
288{
289 u16 pd_count;
290 u16 pd_bit_index;
291
292 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
293 dev->pd_mgr->pd_norm_count;
294 if (pd_count == 0)
295 return -EINVAL;
296
297 if (dpp_pool) {
298 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
299 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
300 return -EINVAL;
301 } else {
302 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
303 dev->pd_mgr->pd_dpp_count--;
304 }
305 } else {
306 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
307 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
308 return -EINVAL;
309 } else {
310 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
311 dev->pd_mgr->pd_norm_count--;
312 }
313 }
314
315 return 0;
316}
317
318static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
319 bool dpp_pool)
320{
321 int status;
322
323 mutex_lock(&dev->dev_lock);
324 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
325 mutex_unlock(&dev->dev_lock);
326 return status;
327}
328
329static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
330{
331 u16 pd_idx = 0;
332 int status = 0;
333
334 mutex_lock(&dev->dev_lock);
335 if (pd->dpp_enabled) {
336 /* try allocating DPP PD, if not available then normal PD */
337 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
338 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
339 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
340 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
341 } else if (dev->pd_mgr->pd_norm_count <
342 dev->pd_mgr->max_normal_pd) {
343 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
344 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
345 pd->dpp_enabled = false;
346 } else {
347 status = -EINVAL;
348 }
349 } else {
350 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
351 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
352 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
353 } else {
354 status = -EINVAL;
355 }
356 }
357 mutex_unlock(&dev->dev_lock);
358 return status;
359}
360
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530361static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
362 struct ocrdma_ucontext *uctx,
363 struct ib_udata *udata)
364{
365 struct ocrdma_pd *pd = NULL;
366 int status = 0;
367
368 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
369 if (!pd)
370 return ERR_PTR(-ENOMEM);
371
Mitesh Ahuja59582d82015-05-19 11:32:37 +0530372 if (udata && uctx && dev->attr.max_dpp_pds) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530373 pd->dpp_enabled =
Devesh Sharma21c33912014-02-04 11:56:56 +0530374 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530375 pd->num_dpp_qp =
Devesh Sharmaa53d77a2014-06-10 19:32:17 +0530376 pd->dpp_enabled ? (dev->nic_info.db_page_size /
377 dev->attr.wqe_size) : 0;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530378 }
379
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530380 if (dev->pd_mgr->pd_prealloc_valid) {
381 status = ocrdma_get_pd_num(dev, pd);
Roland Dreier18eaf1f2015-05-29 23:10:31 -0700382 if (status == 0) {
383 return pd;
384 } else {
385 kfree(pd);
386 return ERR_PTR(status);
387 }
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530388 }
389
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530390retry:
391 status = ocrdma_mbx_alloc_pd(dev, pd);
392 if (status) {
393 if (pd->dpp_enabled) {
394 pd->dpp_enabled = false;
395 pd->num_dpp_qp = 0;
396 goto retry;
397 } else {
398 kfree(pd);
399 return ERR_PTR(status);
400 }
401 }
402
403 return pd;
404}
405
406static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
407 struct ocrdma_pd *pd)
408{
409 return (uctx->cntxt_pd == pd ? true : false);
410}
411
412static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
413 struct ocrdma_pd *pd)
414{
415 int status = 0;
416
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530417 if (dev->pd_mgr->pd_prealloc_valid)
418 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
419 else
420 status = ocrdma_mbx_dealloc_pd(dev, pd);
421
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530422 kfree(pd);
423 return status;
424}
425
426static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
427 struct ocrdma_ucontext *uctx,
428 struct ib_udata *udata)
429{
430 int status = 0;
431
432 uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
433 if (IS_ERR(uctx->cntxt_pd)) {
434 status = PTR_ERR(uctx->cntxt_pd);
435 uctx->cntxt_pd = NULL;
436 goto err;
437 }
438
439 uctx->cntxt_pd->uctx = uctx;
440 uctx->cntxt_pd->ibpd.device = &dev->ibdev;
441err:
442 return status;
443}
444
445static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
446{
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530447 struct ocrdma_pd *pd = uctx->cntxt_pd;
448 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
449
Mitesh Ahuja6dab0262014-06-10 19:32:21 +0530450 if (uctx->pd_in_use) {
451 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
452 __func__, dev->id, pd->id);
453 }
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530454 uctx->cntxt_pd = NULL;
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +0530455 (void)_ocrdma_dealloc_pd(dev, pd);
456 return 0;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530457}
458
459static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
460{
461 struct ocrdma_pd *pd = NULL;
462
463 mutex_lock(&uctx->mm_list_lock);
464 if (!uctx->pd_in_use) {
465 uctx->pd_in_use = true;
466 pd = uctx->cntxt_pd;
467 }
468 mutex_unlock(&uctx->mm_list_lock);
469
470 return pd;
471}
472
473static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
474{
475 mutex_lock(&uctx->mm_list_lock);
476 uctx->pd_in_use = false;
477 mutex_unlock(&uctx->mm_list_lock);
478}
479
Parav Panditfe2caef2012-03-21 04:09:06 +0530480struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
481 struct ib_udata *udata)
482{
483 int status;
484 struct ocrdma_ucontext *ctx;
485 struct ocrdma_alloc_ucontext_resp resp;
486 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
487 struct pci_dev *pdev = dev->nic_info.pdev;
488 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
489
490 if (!udata)
491 return ERR_PTR(-EFAULT);
492 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
493 if (!ctx)
494 return ERR_PTR(-ENOMEM);
Parav Panditfe2caef2012-03-21 04:09:06 +0530495 INIT_LIST_HEAD(&ctx->mm_head);
496 mutex_init(&ctx->mm_list_lock);
497
498 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
499 &ctx->ah_tbl.pa, GFP_KERNEL);
500 if (!ctx->ah_tbl.va) {
501 kfree(ctx);
502 return ERR_PTR(-ENOMEM);
503 }
504 memset(ctx->ah_tbl.va, 0, map_len);
505 ctx->ah_tbl.len = map_len;
506
Dan Carpenter63ea3742013-07-29 22:34:29 +0300507 memset(&resp, 0, sizeof(resp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530508 resp.ah_tbl_len = ctx->ah_tbl.len;
Devesh Sharma1b76d382014-09-05 19:35:40 +0530509 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
Parav Panditfe2caef2012-03-21 04:09:06 +0530510
511 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
512 if (status)
513 goto map_err;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530514
515 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
516 if (status)
517 goto pd_err;
518
Parav Panditfe2caef2012-03-21 04:09:06 +0530519 resp.dev_id = dev->id;
520 resp.max_inline_data = dev->attr.max_inline_data;
521 resp.wqe_size = dev->attr.wqe_size;
522 resp.rqe_size = dev->attr.rqe_size;
523 resp.dpp_wqe_size = dev->attr.wqe_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530524
525 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
526 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
527 if (status)
528 goto cpy_err;
529 return &ctx->ibucontext;
530
531cpy_err:
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530532pd_err:
Parav Panditfe2caef2012-03-21 04:09:06 +0530533 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
534map_err:
535 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
536 ctx->ah_tbl.pa);
537 kfree(ctx);
538 return ERR_PTR(status);
539}
540
541int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
542{
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530543 int status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530544 struct ocrdma_mm *mm, *tmp;
545 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530546 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
547 struct pci_dev *pdev = dev->nic_info.pdev;
Parav Panditfe2caef2012-03-21 04:09:06 +0530548
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530549 status = ocrdma_dealloc_ucontext_pd(uctx);
550
Parav Panditfe2caef2012-03-21 04:09:06 +0530551 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
552 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
553 uctx->ah_tbl.pa);
554
555 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
556 list_del(&mm->entry);
557 kfree(mm);
558 }
559 kfree(uctx);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530560 return status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530561}
562
563int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
564{
565 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530566 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530567 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
568 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
569 unsigned long len = (vma->vm_end - vma->vm_start);
570 int status = 0;
571 bool found;
572
573 if (vma->vm_start & (PAGE_SIZE - 1))
574 return -EINVAL;
575 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
576 if (!found)
577 return -EINVAL;
578
579 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
580 dev->nic_info.db_total_size)) &&
581 (len <= dev->nic_info.db_page_size)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530582 if (vma->vm_flags & VM_READ)
583 return -EPERM;
584
585 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Parav Panditfe2caef2012-03-21 04:09:06 +0530586 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
587 len, vma->vm_page_prot);
588 } else if (dev->nic_info.dpp_unmapped_len &&
589 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
590 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
591 dev->nic_info.dpp_unmapped_len)) &&
592 (len <= dev->nic_info.dpp_unmapped_len)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530593 if (vma->vm_flags & VM_READ)
594 return -EPERM;
595
Parav Panditfe2caef2012-03-21 04:09:06 +0530596 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
597 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
598 len, vma->vm_page_prot);
599 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +0530600 status = remap_pfn_range(vma, vma->vm_start,
601 vma->vm_pgoff, len, vma->vm_page_prot);
602 }
603 return status;
604}
605
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530606static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
Parav Panditfe2caef2012-03-21 04:09:06 +0530607 struct ib_ucontext *ib_ctx,
608 struct ib_udata *udata)
609{
610 int status;
611 u64 db_page_addr;
Roland Dreierda496432012-04-16 11:32:17 -0700612 u64 dpp_page_addr = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530613 u32 db_page_size;
614 struct ocrdma_alloc_pd_uresp rsp;
615 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
616
Dan Carpenter63ea3742013-07-29 22:34:29 +0300617 memset(&rsp, 0, sizeof(rsp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530618 rsp.id = pd->id;
619 rsp.dpp_enabled = pd->dpp_enabled;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530620 db_page_addr = ocrdma_get_db_addr(dev, pd->id);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530621 db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530622
623 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
624 if (status)
625 return status;
626
627 if (pd->dpp_enabled) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530628 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530629 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530630 status = ocrdma_add_mmap(uctx, dpp_page_addr,
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530631 PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530632 if (status)
633 goto dpp_map_err;
634 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
635 rsp.dpp_page_addr_lo = dpp_page_addr;
636 }
637
638 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
639 if (status)
640 goto ucopy_err;
641
642 pd->uctx = uctx;
643 return 0;
644
645ucopy_err:
Roland Dreierda496432012-04-16 11:32:17 -0700646 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530647 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530648dpp_map_err:
649 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
650 return status;
651}
652
653struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
654 struct ib_ucontext *context,
655 struct ib_udata *udata)
656{
657 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
658 struct ocrdma_pd *pd;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530659 struct ocrdma_ucontext *uctx = NULL;
Parav Panditfe2caef2012-03-21 04:09:06 +0530660 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530661 u8 is_uctx_pd = false;
Parav Panditfe2caef2012-03-21 04:09:06 +0530662
Parav Panditfe2caef2012-03-21 04:09:06 +0530663 if (udata && context) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530664 uctx = get_ocrdma_ucontext(context);
665 pd = ocrdma_get_ucontext_pd(uctx);
666 if (pd) {
667 is_uctx_pd = true;
668 goto pd_mapping;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530669 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530670 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530671
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530672 pd = _ocrdma_alloc_pd(dev, uctx, udata);
673 if (IS_ERR(pd)) {
674 status = PTR_ERR(pd);
675 goto exit;
676 }
677
678pd_mapping:
Parav Panditfe2caef2012-03-21 04:09:06 +0530679 if (udata && context) {
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530680 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +0530681 if (status)
682 goto err;
683 }
684 return &pd->ibpd;
685
686err:
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530687 if (is_uctx_pd) {
688 ocrdma_release_ucontext_pd(uctx);
689 } else {
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530690 status = _ocrdma_dealloc_pd(dev, pd);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530691 }
692exit:
Parav Panditfe2caef2012-03-21 04:09:06 +0530693 return ERR_PTR(status);
694}
695
696int ocrdma_dealloc_pd(struct ib_pd *ibpd)
697{
698 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530699 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530700 struct ocrdma_ucontext *uctx = NULL;
701 int status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530702 u64 usr_db;
703
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530704 uctx = pd->uctx;
705 if (uctx) {
Parav Panditfe2caef2012-03-21 04:09:06 +0530706 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530707 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530708 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530709 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530710 usr_db = ocrdma_get_db_addr(dev, pd->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530711 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530712
713 if (is_ucontext_pd(uctx, pd)) {
714 ocrdma_release_ucontext_pd(uctx);
715 return status;
716 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530717 }
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530718 status = _ocrdma_dealloc_pd(dev, pd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530719 return status;
720}
721
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530722static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
723 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
Parav Panditfe2caef2012-03-21 04:09:06 +0530724{
725 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530726
Parav Panditfe2caef2012-03-21 04:09:06 +0530727 mr->hwmr.fr_mr = 0;
728 mr->hwmr.local_rd = 1;
729 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
730 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
731 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
732 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
733 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
734 mr->hwmr.num_pbls = num_pbls;
735
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530736 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
737 if (status)
738 return status;
739
Parav Panditfe2caef2012-03-21 04:09:06 +0530740 mr->ibmr.lkey = mr->hwmr.lkey;
741 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
742 mr->ibmr.rkey = mr->hwmr.lkey;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530743 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530744}
745
746struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
747{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530748 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530749 struct ocrdma_mr *mr;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530750 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
751 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530752
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530753 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
754 pr_err("%s err, invalid access rights\n", __func__);
755 return ERR_PTR(-EINVAL);
756 }
757
758 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
759 if (!mr)
760 return ERR_PTR(-ENOMEM);
761
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530762 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530763 OCRDMA_ADDR_CHECK_DISABLE);
764 if (status) {
765 kfree(mr);
766 return ERR_PTR(status);
767 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530768
769 return &mr->ibmr;
770}
771
772static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
773 struct ocrdma_hw_mr *mr)
774{
775 struct pci_dev *pdev = dev->nic_info.pdev;
776 int i = 0;
777
778 if (mr->pbl_table) {
779 for (i = 0; i < mr->num_pbls; i++) {
780 if (!mr->pbl_table[i].va)
781 continue;
782 dma_free_coherent(&pdev->dev, mr->pbl_size,
783 mr->pbl_table[i].va,
784 mr->pbl_table[i].pa);
785 }
786 kfree(mr->pbl_table);
787 mr->pbl_table = NULL;
788 }
789}
790
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530791static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
792 u32 num_pbes)
Parav Panditfe2caef2012-03-21 04:09:06 +0530793{
794 u32 num_pbls = 0;
795 u32 idx = 0;
796 int status = 0;
797 u32 pbl_size;
798
799 do {
800 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
801 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
802 status = -EFAULT;
803 break;
804 }
805 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
806 num_pbls = num_pbls / (pbl_size / sizeof(u64));
807 idx++;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530808 } while (num_pbls >= dev->attr.max_num_mr_pbl);
Parav Panditfe2caef2012-03-21 04:09:06 +0530809
810 mr->hwmr.num_pbes = num_pbes;
811 mr->hwmr.num_pbls = num_pbls;
812 mr->hwmr.pbl_size = pbl_size;
813 return status;
814}
815
816static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
817{
818 int status = 0;
819 int i;
820 u32 dma_len = mr->pbl_size;
821 struct pci_dev *pdev = dev->nic_info.pdev;
822 void *va;
823 dma_addr_t pa;
824
825 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
826 mr->num_pbls, GFP_KERNEL);
827
828 if (!mr->pbl_table)
829 return -ENOMEM;
830
831 for (i = 0; i < mr->num_pbls; i++) {
832 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
833 if (!va) {
834 ocrdma_free_mr_pbl_tbl(dev, mr);
835 status = -ENOMEM;
836 break;
837 }
838 memset(va, 0, dma_len);
839 mr->pbl_table[i].va = va;
840 mr->pbl_table[i].pa = pa;
841 }
842 return status;
843}
844
845static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
846 u32 num_pbes)
847{
848 struct ocrdma_pbe *pbe;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200849 struct scatterlist *sg;
Parav Panditfe2caef2012-03-21 04:09:06 +0530850 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
851 struct ib_umem *umem = mr->umem;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200852 int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530853
854 if (!mr->hwmr.num_pbes)
855 return;
856
857 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
858 pbe_cnt = 0;
859
860 shift = ilog2(umem->page_size);
861
Yishai Hadaseeb84612014-01-28 13:40:15 +0200862 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
863 pages = sg_dma_len(sg) >> shift;
864 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
865 /* store the page address in pbe */
866 pbe->pa_lo =
867 cpu_to_le32(sg_dma_address
868 (sg) +
869 (umem->page_size * pg_cnt));
870 pbe->pa_hi =
871 cpu_to_le32(upper_32_bits
872 ((sg_dma_address
873 (sg) +
874 umem->page_size * pg_cnt)));
875 pbe_cnt += 1;
876 total_num_pbes += 1;
877 pbe++;
Parav Panditfe2caef2012-03-21 04:09:06 +0530878
Yishai Hadaseeb84612014-01-28 13:40:15 +0200879 /* if done building pbes, issue the mbx cmd. */
880 if (total_num_pbes == num_pbes)
881 return;
Parav Panditfe2caef2012-03-21 04:09:06 +0530882
Yishai Hadaseeb84612014-01-28 13:40:15 +0200883 /* if the given pbl is full storing the pbes,
884 * move to next pbl.
885 */
886 if (pbe_cnt ==
887 (mr->hwmr.pbl_size / sizeof(u64))) {
888 pbl_tbl++;
889 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
890 pbe_cnt = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530891 }
Yishai Hadaseeb84612014-01-28 13:40:15 +0200892
Parav Panditfe2caef2012-03-21 04:09:06 +0530893 }
894 }
895}
896
897struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
898 u64 usr_addr, int acc, struct ib_udata *udata)
899{
900 int status = -ENOMEM;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530901 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530902 struct ocrdma_mr *mr;
903 struct ocrdma_pd *pd;
Parav Panditfe2caef2012-03-21 04:09:06 +0530904 u32 num_pbes;
905
906 pd = get_ocrdma_pd(ibpd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530907
908 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
909 return ERR_PTR(-EINVAL);
910
911 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
912 if (!mr)
913 return ERR_PTR(status);
Parav Panditfe2caef2012-03-21 04:09:06 +0530914 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
915 if (IS_ERR(mr->umem)) {
916 status = -EFAULT;
917 goto umem_err;
918 }
919 num_pbes = ib_umem_page_count(mr->umem);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530920 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
Parav Panditfe2caef2012-03-21 04:09:06 +0530921 if (status)
922 goto umem_err;
923
924 mr->hwmr.pbe_size = mr->umem->page_size;
Haggai Eran406f9e52014-12-11 17:04:12 +0200925 mr->hwmr.fbo = ib_umem_offset(mr->umem);
Parav Panditfe2caef2012-03-21 04:09:06 +0530926 mr->hwmr.va = usr_addr;
927 mr->hwmr.len = len;
928 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
929 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
930 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
931 mr->hwmr.local_rd = 1;
932 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
933 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
934 if (status)
935 goto umem_err;
936 build_user_pbes(dev, mr, num_pbes);
937 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
938 if (status)
939 goto mbx_err;
Parav Panditfe2caef2012-03-21 04:09:06 +0530940 mr->ibmr.lkey = mr->hwmr.lkey;
941 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
942 mr->ibmr.rkey = mr->hwmr.lkey;
943
944 return &mr->ibmr;
945
946mbx_err:
947 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
948umem_err:
949 kfree(mr);
950 return ERR_PTR(status);
951}
952
953int ocrdma_dereg_mr(struct ib_mr *ib_mr)
954{
955 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530956 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530957
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +0530958 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
Parav Panditfe2caef2012-03-21 04:09:06 +0530959
Selvin Xavier9d1878a2014-02-04 11:57:02 +0530960 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
Parav Panditfe2caef2012-03-21 04:09:06 +0530961
Parav Panditfe2caef2012-03-21 04:09:06 +0530962 /* it could be user registered memory. */
963 if (mr->umem)
964 ib_umem_release(mr->umem);
965 kfree(mr);
Mitesh Ahuja6dab0262014-06-10 19:32:21 +0530966
967 /* Don't stop cleanup, in case FW is unresponsive */
968 if (dev->mqe_ctx.fw_error_state) {
Mitesh Ahuja6dab0262014-06-10 19:32:21 +0530969 pr_err("%s(%d) fw not responding.\n",
970 __func__, dev->id);
971 }
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +0530972 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530973}
974
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530975static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
976 struct ib_udata *udata,
Parav Panditfe2caef2012-03-21 04:09:06 +0530977 struct ib_ucontext *ib_ctx)
978{
979 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530980 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
Parav Panditfe2caef2012-03-21 04:09:06 +0530981 struct ocrdma_create_cq_uresp uresp;
982
Dan Carpenter63ea3742013-07-29 22:34:29 +0300983 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530984 uresp.cq_id = cq->id;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530985 uresp.page_size = PAGE_ALIGN(cq->len);
Parav Panditfe2caef2012-03-21 04:09:06 +0530986 uresp.num_pages = 1;
987 uresp.max_hw_cqe = cq->max_hw_cqe;
Devesh Sharma1b76d382014-09-05 19:35:40 +0530988 uresp.page_addr[0] = virt_to_phys(cq->va);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530989 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530990 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530991 uresp.phase_change = cq->phase_change ? 1 : 0;
992 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
993 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000994 pr_err("%s(%d) copy error cqid=0x%x.\n",
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530995 __func__, dev->id, cq->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530996 goto err;
997 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530998 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
999 if (status)
1000 goto err;
1001 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
1002 if (status) {
1003 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1004 goto err;
1005 }
1006 cq->ucontext = uctx;
1007err:
1008 return status;
1009}
1010
Matan Barakbcf4c1e2015-06-11 16:35:20 +03001011struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1012 const struct ib_cq_init_attr *attr,
Parav Panditfe2caef2012-03-21 04:09:06 +05301013 struct ib_ucontext *ib_ctx,
1014 struct ib_udata *udata)
1015{
Matan Barakbcf4c1e2015-06-11 16:35:20 +03001016 int entries = attr->cqe;
Parav Panditfe2caef2012-03-21 04:09:06 +05301017 struct ocrdma_cq *cq;
1018 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301019 struct ocrdma_ucontext *uctx = NULL;
1020 u16 pd_id = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301021 int status;
1022 struct ocrdma_create_cq_ureq ureq;
1023
Matan Barakbcf4c1e2015-06-11 16:35:20 +03001024 if (attr->flags)
1025 return ERR_PTR(-EINVAL);
1026
Parav Panditfe2caef2012-03-21 04:09:06 +05301027 if (udata) {
1028 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1029 return ERR_PTR(-EFAULT);
1030 } else
1031 ureq.dpp_cq = 0;
1032 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1033 if (!cq)
1034 return ERR_PTR(-ENOMEM);
1035
1036 spin_lock_init(&cq->cq_lock);
1037 spin_lock_init(&cq->comp_handler_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301038 INIT_LIST_HEAD(&cq->sq_head);
1039 INIT_LIST_HEAD(&cq->rq_head);
Devesh Sharmaea6176262014-02-04 11:56:54 +05301040 cq->first_arm = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05301041
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301042 if (ib_ctx) {
1043 uctx = get_ocrdma_ucontext(ib_ctx);
1044 pd_id = uctx->cntxt_pd->id;
1045 }
1046
1047 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301048 if (status) {
1049 kfree(cq);
1050 return ERR_PTR(status);
1051 }
1052 if (ib_ctx) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301053 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
Parav Panditfe2caef2012-03-21 04:09:06 +05301054 if (status)
1055 goto ctx_err;
1056 }
1057 cq->phase = OCRDMA_CQE_VALID;
Parav Panditfe2caef2012-03-21 04:09:06 +05301058 dev->cq_tbl[cq->id] = cq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301059 return &cq->ibcq;
1060
1061ctx_err:
1062 ocrdma_mbx_destroy_cq(dev, cq);
1063 kfree(cq);
1064 return ERR_PTR(status);
1065}
1066
1067int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1068 struct ib_udata *udata)
1069{
1070 int status = 0;
1071 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1072
1073 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1074 status = -EINVAL;
1075 return status;
1076 }
1077 ibcq->cqe = new_cnt;
1078 return status;
1079}
1080
Devesh Sharmaea6176262014-02-04 11:56:54 +05301081static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1082{
1083 int cqe_cnt;
1084 int valid_count = 0;
1085 unsigned long flags;
1086
1087 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1088 struct ocrdma_cqe *cqe = NULL;
1089
1090 cqe = cq->va;
1091 cqe_cnt = cq->cqe_cnt;
1092
1093 /* Last irq might have scheduled a polling thread
1094 * sync-up with it before hard flushing.
1095 */
1096 spin_lock_irqsave(&cq->cq_lock, flags);
1097 while (cqe_cnt) {
1098 if (is_cqe_valid(cq, cqe))
1099 valid_count++;
1100 cqe++;
1101 cqe_cnt--;
1102 }
1103 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1104 spin_unlock_irqrestore(&cq->cq_lock, flags);
1105}
1106
Parav Panditfe2caef2012-03-21 04:09:06 +05301107int ocrdma_destroy_cq(struct ib_cq *ibcq)
1108{
Parav Panditfe2caef2012-03-21 04:09:06 +05301109 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
Devesh Sharmaea6176262014-02-04 11:56:54 +05301110 struct ocrdma_eq *eq = NULL;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301111 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301112 int pdid = 0;
Devesh Sharmaea6176262014-02-04 11:56:54 +05301113 u32 irq, indx;
1114
1115 dev->cq_tbl[cq->id] = NULL;
1116 indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1117 if (indx == -EINVAL)
1118 BUG();
1119
1120 eq = &dev->eq_tbl[indx];
1121 irq = ocrdma_get_irq(dev, eq);
1122 synchronize_irq(irq);
1123 ocrdma_flush_cq(cq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301124
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +05301125 (void)ocrdma_mbx_destroy_cq(dev, cq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301126 if (cq->ucontext) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301127 pdid = cq->ucontext->cntxt_pd->id;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301128 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1129 PAGE_ALIGN(cq->len));
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301130 ocrdma_del_mmap(cq->ucontext,
1131 ocrdma_get_db_addr(dev, pdid),
Parav Panditfe2caef2012-03-21 04:09:06 +05301132 dev->nic_info.db_page_size);
1133 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301134
1135 kfree(cq);
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +05301136 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301137}
1138
1139static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1140{
1141 int status = -EINVAL;
1142
1143 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1144 dev->qp_tbl[qp->id] = qp;
1145 status = 0;
1146 }
1147 return status;
1148}
1149
1150static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1151{
1152 dev->qp_tbl[qp->id] = NULL;
1153}
1154
1155static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1156 struct ib_qp_init_attr *attrs)
1157{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301158 if ((attrs->qp_type != IB_QPT_GSI) &&
1159 (attrs->qp_type != IB_QPT_RC) &&
1160 (attrs->qp_type != IB_QPT_UC) &&
1161 (attrs->qp_type != IB_QPT_UD)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001162 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1163 __func__, dev->id, attrs->qp_type);
Parav Panditfe2caef2012-03-21 04:09:06 +05301164 return -EINVAL;
1165 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301166 /* Skip the check for QP1 to support CM size of 128 */
1167 if ((attrs->qp_type != IB_QPT_GSI) &&
1168 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001169 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1170 __func__, dev->id, attrs->cap.max_send_wr);
1171 pr_err("%s(%d) supported send_wr=0x%x\n",
1172 __func__, dev->id, dev->attr.max_wqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05301173 return -EINVAL;
1174 }
1175 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001176 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1177 __func__, dev->id, attrs->cap.max_recv_wr);
1178 pr_err("%s(%d) supported recv_wr=0x%x\n",
1179 __func__, dev->id, dev->attr.max_rqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05301180 return -EINVAL;
1181 }
1182 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001183 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1184 __func__, dev->id, attrs->cap.max_inline_data);
1185 pr_err("%s(%d) supported inline data size=0x%x\n",
1186 __func__, dev->id, dev->attr.max_inline_data);
Parav Panditfe2caef2012-03-21 04:09:06 +05301187 return -EINVAL;
1188 }
1189 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001190 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1191 __func__, dev->id, attrs->cap.max_send_sge);
1192 pr_err("%s(%d) supported send_sge=0x%x\n",
1193 __func__, dev->id, dev->attr.max_send_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301194 return -EINVAL;
1195 }
1196 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001197 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1198 __func__, dev->id, attrs->cap.max_recv_sge);
1199 pr_err("%s(%d) supported recv_sge=0x%x\n",
1200 __func__, dev->id, dev->attr.max_recv_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301201 return -EINVAL;
1202 }
1203 /* unprivileged user space cannot create special QP */
1204 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001205 pr_err
Parav Panditfe2caef2012-03-21 04:09:06 +05301206 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1207 __func__, dev->id, attrs->qp_type);
1208 return -EINVAL;
1209 }
1210 /* allow creating only one GSI type of QP */
1211 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001212 pr_err("%s(%d) GSI special QPs already created.\n",
1213 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301214 return -EINVAL;
1215 }
1216 /* verify consumer QPs are not trying to use GSI QP's CQ */
1217 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1218 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301219 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001220 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301221 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301222 return -EINVAL;
1223 }
1224 }
1225 return 0;
1226}
1227
1228static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1229 struct ib_udata *udata, int dpp_offset,
1230 int dpp_credit_lmt, int srq)
1231{
1232 int status = 0;
1233 u64 usr_db;
1234 struct ocrdma_create_qp_uresp uresp;
Parav Panditfe2caef2012-03-21 04:09:06 +05301235 struct ocrdma_pd *pd = qp->pd;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301236 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301237
1238 memset(&uresp, 0, sizeof(uresp));
1239 usr_db = dev->nic_info.unmapped_db +
1240 (pd->id * dev->nic_info.db_page_size);
1241 uresp.qp_id = qp->id;
1242 uresp.sq_dbid = qp->sq.dbid;
1243 uresp.num_sq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301244 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
Devesh Sharma1b76d382014-09-05 19:35:40 +05301245 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
Parav Panditfe2caef2012-03-21 04:09:06 +05301246 uresp.num_wqe_allocated = qp->sq.max_cnt;
1247 if (!srq) {
1248 uresp.rq_dbid = qp->rq.dbid;
1249 uresp.num_rq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301250 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
Devesh Sharma1b76d382014-09-05 19:35:40 +05301251 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
Parav Panditfe2caef2012-03-21 04:09:06 +05301252 uresp.num_rqe_allocated = qp->rq.max_cnt;
1253 }
1254 uresp.db_page_addr = usr_db;
1255 uresp.db_page_size = dev->nic_info.db_page_size;
Devesh Sharma2df84fa82014-02-04 11:56:55 +05301256 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1257 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1258 uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
Parav Panditfe2caef2012-03-21 04:09:06 +05301259
1260 if (qp->dpp_enabled) {
1261 uresp.dpp_credit = dpp_credit_lmt;
1262 uresp.dpp_offset = dpp_offset;
1263 }
1264 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1265 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001266 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301267 goto err;
1268 }
1269 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1270 uresp.sq_page_size);
1271 if (status)
1272 goto err;
1273
1274 if (!srq) {
1275 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1276 uresp.rq_page_size);
1277 if (status)
1278 goto rq_map_err;
1279 }
1280 return status;
1281rq_map_err:
1282 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1283err:
1284 return status;
1285}
1286
1287static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1288 struct ocrdma_pd *pd)
1289{
Devesh Sharma21c33912014-02-04 11:56:56 +05301290 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301291 qp->sq_db = dev->nic_info.db +
1292 (pd->id * dev->nic_info.db_page_size) +
1293 OCRDMA_DB_GEN2_SQ_OFFSET;
1294 qp->rq_db = dev->nic_info.db +
1295 (pd->id * dev->nic_info.db_page_size) +
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301296 OCRDMA_DB_GEN2_RQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301297 } else {
1298 qp->sq_db = dev->nic_info.db +
1299 (pd->id * dev->nic_info.db_page_size) +
1300 OCRDMA_DB_SQ_OFFSET;
1301 qp->rq_db = dev->nic_info.db +
1302 (pd->id * dev->nic_info.db_page_size) +
1303 OCRDMA_DB_RQ_OFFSET;
1304 }
1305}
1306
1307static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1308{
1309 qp->wqe_wr_id_tbl =
1310 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1311 GFP_KERNEL);
1312 if (qp->wqe_wr_id_tbl == NULL)
1313 return -ENOMEM;
1314 qp->rqe_wr_id_tbl =
1315 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1316 if (qp->rqe_wr_id_tbl == NULL)
1317 return -ENOMEM;
1318
1319 return 0;
1320}
1321
1322static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1323 struct ocrdma_pd *pd,
1324 struct ib_qp_init_attr *attrs)
1325{
1326 qp->pd = pd;
1327 spin_lock_init(&qp->q_lock);
1328 INIT_LIST_HEAD(&qp->sq_entry);
1329 INIT_LIST_HEAD(&qp->rq_entry);
1330
1331 qp->qp_type = attrs->qp_type;
1332 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1333 qp->max_inline_data = attrs->cap.max_inline_data;
1334 qp->sq.max_sges = attrs->cap.max_send_sge;
1335 qp->rq.max_sges = attrs->cap.max_recv_sge;
1336 qp->state = OCRDMA_QPS_RST;
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05301337 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
Parav Panditfe2caef2012-03-21 04:09:06 +05301338}
1339
Parav Panditfe2caef2012-03-21 04:09:06 +05301340static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1341 struct ib_qp_init_attr *attrs)
1342{
1343 if (attrs->qp_type == IB_QPT_GSI) {
1344 dev->gsi_qp_created = 1;
1345 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1346 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1347 }
1348}
1349
1350struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1351 struct ib_qp_init_attr *attrs,
1352 struct ib_udata *udata)
1353{
1354 int status;
1355 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1356 struct ocrdma_qp *qp;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301357 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301358 struct ocrdma_create_qp_ureq ureq;
1359 u16 dpp_credit_lmt, dpp_offset;
1360
1361 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1362 if (status)
1363 goto gen_err;
1364
1365 memset(&ureq, 0, sizeof(ureq));
1366 if (udata) {
1367 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1368 return ERR_PTR(-EFAULT);
1369 }
1370 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1371 if (!qp) {
1372 status = -ENOMEM;
1373 goto gen_err;
1374 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301375 ocrdma_set_qp_init_params(qp, pd, attrs);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301376 if (udata == NULL)
1377 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1378 OCRDMA_QP_FAST_REG);
Parav Panditfe2caef2012-03-21 04:09:06 +05301379
1380 mutex_lock(&dev->dev_lock);
1381 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1382 ureq.dpp_cq_id,
1383 &dpp_offset, &dpp_credit_lmt);
1384 if (status)
1385 goto mbx_err;
1386
1387 /* user space QP's wr_id table are managed in library */
1388 if (udata == NULL) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301389 status = ocrdma_alloc_wr_id_tbl(qp);
1390 if (status)
1391 goto map_err;
1392 }
1393
1394 status = ocrdma_add_qpn_map(dev, qp);
1395 if (status)
1396 goto map_err;
1397 ocrdma_set_qp_db(dev, qp, pd);
1398 if (udata) {
1399 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1400 dpp_credit_lmt,
1401 (attrs->srq != NULL));
1402 if (status)
1403 goto cpy_err;
1404 }
1405 ocrdma_store_gsi_qp_cq(dev, attrs);
Gottumukkala, Naresh27159f52013-06-05 08:50:46 +00001406 qp->ibqp.qp_num = qp->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301407 mutex_unlock(&dev->dev_lock);
1408 return &qp->ibqp;
1409
1410cpy_err:
1411 ocrdma_del_qpn_map(dev, qp);
1412map_err:
1413 ocrdma_mbx_destroy_qp(dev, qp);
1414mbx_err:
1415 mutex_unlock(&dev->dev_lock);
1416 kfree(qp->wqe_wr_id_tbl);
1417 kfree(qp->rqe_wr_id_tbl);
1418 kfree(qp);
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001419 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301420gen_err:
1421 return ERR_PTR(status);
1422}
1423
1424int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1425 int attr_mask)
1426{
1427 int status = 0;
1428 struct ocrdma_qp *qp;
1429 struct ocrdma_dev *dev;
1430 enum ib_qp_state old_qps;
1431
1432 qp = get_ocrdma_qp(ibqp);
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301433 dev = get_ocrdma_dev(ibqp->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301434 if (attr_mask & IB_QP_STATE)
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05301435 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301436 /* if new and previous states are same hw doesn't need to
1437 * know about it.
1438 */
1439 if (status < 0)
1440 return status;
Prarit Bhargavabc1b04a2014-02-19 15:05:16 -05001441 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05301442
Parav Panditfe2caef2012-03-21 04:09:06 +05301443 return status;
1444}
1445
1446int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1447 int attr_mask, struct ib_udata *udata)
1448{
1449 unsigned long flags;
1450 int status = -EINVAL;
1451 struct ocrdma_qp *qp;
1452 struct ocrdma_dev *dev;
1453 enum ib_qp_state old_qps, new_qps;
1454
1455 qp = get_ocrdma_qp(ibqp);
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301456 dev = get_ocrdma_dev(ibqp->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301457
1458 /* syncronize with multiple context trying to change, retrive qps */
1459 mutex_lock(&dev->dev_lock);
1460 /* syncronize with wqe, rqe posting and cqe processing contexts */
1461 spin_lock_irqsave(&qp->q_lock, flags);
1462 old_qps = get_ibqp_state(qp->state);
1463 if (attr_mask & IB_QP_STATE)
1464 new_qps = attr->qp_state;
1465 else
1466 new_qps = old_qps;
1467 spin_unlock_irqrestore(&qp->q_lock, flags);
1468
Matan Barakdd5f03b2013-12-12 18:03:11 +02001469 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
Moni Shoua37721d82013-12-12 18:03:16 +02001470 IB_LINK_LAYER_ETHERNET)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001471 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1472 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1473 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1474 old_qps, new_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301475 goto param_err;
1476 }
1477
1478 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1479 if (status > 0)
1480 status = 0;
1481param_err:
1482 mutex_unlock(&dev->dev_lock);
1483 return status;
1484}
1485
1486static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1487{
1488 switch (mtu) {
1489 case 256:
1490 return IB_MTU_256;
1491 case 512:
1492 return IB_MTU_512;
1493 case 1024:
1494 return IB_MTU_1024;
1495 case 2048:
1496 return IB_MTU_2048;
1497 case 4096:
1498 return IB_MTU_4096;
1499 default:
1500 return IB_MTU_1024;
1501 }
1502}
1503
1504static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1505{
1506 int ib_qp_acc_flags = 0;
1507
1508 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1509 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1510 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1511 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1512 return ib_qp_acc_flags;
1513}
1514
1515int ocrdma_query_qp(struct ib_qp *ibqp,
1516 struct ib_qp_attr *qp_attr,
1517 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1518{
1519 int status;
1520 u32 qp_state;
1521 struct ocrdma_qp_params params;
1522 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301523 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301524
1525 memset(&params, 0, sizeof(params));
1526 mutex_lock(&dev->dev_lock);
1527 status = ocrdma_mbx_query_qp(dev, qp, &params);
1528 mutex_unlock(&dev->dev_lock);
1529 if (status)
1530 goto mbx_err;
Mitesh Ahuja95bf0092014-12-03 11:36:33 +05301531 if (qp->qp_type == IB_QPT_UD)
1532 qp_attr->qkey = params.qkey;
Parav Panditfe2caef2012-03-21 04:09:06 +05301533 qp_attr->path_mtu =
1534 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1535 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1536 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1537 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1538 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1539 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1540 qp_attr->dest_qp_num =
1541 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1542
1543 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1544 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1545 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1546 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1547 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Naresh Gottumukkalac43e9ab2013-08-26 15:27:46 +05301548 qp_attr->cap.max_inline_data = qp->max_inline_data;
Parav Panditfe2caef2012-03-21 04:09:06 +05301549 qp_init_attr->cap = qp_attr->cap;
1550 memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1551 sizeof(params.dgid));
1552 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1553 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1554 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1555 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1556 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1557 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1558 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
Devesh Sharmaa61d93d2014-02-10 13:48:58 +05301559 OCRDMA_QP_PARAMS_TCLASS_MASK) >>
Parav Panditfe2caef2012-03-21 04:09:06 +05301560 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1561
1562 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1563 qp_attr->ah_attr.port_num = 1;
1564 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1565 OCRDMA_QP_PARAMS_SL_MASK) >>
1566 OCRDMA_QP_PARAMS_SL_SHIFT;
1567 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1568 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1569 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1570 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1571 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1572 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1573 qp_attr->retry_cnt =
1574 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1575 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1576 qp_attr->min_rnr_timer = 0;
1577 qp_attr->pkey_index = 0;
1578 qp_attr->port_num = 1;
1579 qp_attr->ah_attr.src_path_bits = 0;
1580 qp_attr->ah_attr.static_rate = 0;
1581 qp_attr->alt_pkey_index = 0;
1582 qp_attr->alt_port_num = 0;
1583 qp_attr->alt_timeout = 0;
1584 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1585 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1586 OCRDMA_QP_PARAMS_STATE_SHIFT;
Padmanabh Ratnakar43c706b2014-12-18 14:13:00 +05301587 qp_attr->qp_state = get_ibqp_state(qp_state);
1588 qp_attr->cur_qp_state = qp_attr->qp_state;
Parav Panditfe2caef2012-03-21 04:09:06 +05301589 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1590 qp_attr->max_dest_rd_atomic =
1591 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1592 qp_attr->max_rd_atomic =
1593 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1594 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1595 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
Padmanabh Ratnakar43c706b2014-12-18 14:13:00 +05301596 /* Sync driver QP state with FW */
1597 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
Parav Panditfe2caef2012-03-21 04:09:06 +05301598mbx_err:
1599 return status;
1600}
1601
Rasmus Villemoesf3070e72015-01-16 15:39:56 +01001602static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
Parav Panditfe2caef2012-03-21 04:09:06 +05301603{
Rasmus Villemoesf3070e72015-01-16 15:39:56 +01001604 unsigned int i = idx / 32;
1605 u32 mask = (1U << (idx % 32));
Parav Panditfe2caef2012-03-21 04:09:06 +05301606
Rasmus Villemoesba64fdc2015-01-16 15:39:55 +01001607 srq->idx_bit_fields[i] ^= mask;
Parav Panditfe2caef2012-03-21 04:09:06 +05301608}
1609
1610static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1611{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301612 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
Parav Panditfe2caef2012-03-21 04:09:06 +05301613}
1614
1615static int is_hw_sq_empty(struct ocrdma_qp *qp)
1616{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301617 return (qp->sq.tail == qp->sq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301618}
1619
1620static int is_hw_rq_empty(struct ocrdma_qp *qp)
1621{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301622 return (qp->rq.tail == qp->rq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301623}
1624
1625static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1626{
1627 return q->va + (q->head * q->entry_size);
1628}
1629
1630static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1631 u32 idx)
1632{
1633 return q->va + (idx * q->entry_size);
1634}
1635
1636static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1637{
1638 q->head = (q->head + 1) & q->max_wqe_idx;
1639}
1640
1641static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1642{
1643 q->tail = (q->tail + 1) & q->max_wqe_idx;
1644}
1645
1646/* discard the cqe for a given QP */
1647static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1648{
1649 unsigned long cq_flags;
1650 unsigned long flags;
1651 int discard_cnt = 0;
1652 u32 cur_getp, stop_getp;
1653 struct ocrdma_cqe *cqe;
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301654 u32 qpn = 0, wqe_idx = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301655
1656 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1657
1658 /* traverse through the CQEs in the hw CQ,
1659 * find the matching CQE for a given qp,
1660 * mark the matching one discarded by clearing qpn.
1661 * ring the doorbell in the poll_cq() as
1662 * we don't complete out of order cqe.
1663 */
1664
1665 cur_getp = cq->getp;
1666 /* find upto when do we reap the cq. */
1667 stop_getp = cur_getp;
1668 do {
1669 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1670 break;
1671
1672 cqe = cq->va + cur_getp;
1673 /* if (a) done reaping whole hw cq, or
1674 * (b) qp_xq becomes empty.
1675 * then exit
1676 */
1677 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1678 /* if previously discarded cqe found, skip that too. */
1679 /* check for matching qp */
1680 if (qpn == 0 || qpn != qp->id)
1681 goto skip_cqe;
1682
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301683 if (is_cqe_for_sq(cqe)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301684 ocrdma_hwq_inc_tail(&qp->sq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301685 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301686 if (qp->srq) {
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301687 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1688 OCRDMA_CQE_BUFTAG_SHIFT) &
1689 qp->srq->rq.max_wqe_idx;
1690 if (wqe_idx < 1)
1691 BUG();
Parav Panditfe2caef2012-03-21 04:09:06 +05301692 spin_lock_irqsave(&qp->srq->q_lock, flags);
1693 ocrdma_hwq_inc_tail(&qp->srq->rq);
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301694 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05301695 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1696
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301697 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301698 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301699 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301700 }
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301701 /* mark cqe discarded so that it is not picked up later
1702 * in the poll_cq().
1703 */
1704 discard_cnt += 1;
1705 cqe->cmn.qpn = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301706skip_cqe:
1707 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1708 } while (cur_getp != stop_getp);
1709 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1710}
1711
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301712void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
Parav Panditfe2caef2012-03-21 04:09:06 +05301713{
1714 int found = false;
1715 unsigned long flags;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301716 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301717 /* sync with any active CQ poll */
1718
1719 spin_lock_irqsave(&dev->flush_q_lock, flags);
1720 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1721 if (found)
1722 list_del(&qp->sq_entry);
1723 if (!qp->srq) {
1724 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1725 if (found)
1726 list_del(&qp->rq_entry);
1727 }
1728 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1729}
1730
1731int ocrdma_destroy_qp(struct ib_qp *ibqp)
1732{
Parav Panditfe2caef2012-03-21 04:09:06 +05301733 struct ocrdma_pd *pd;
1734 struct ocrdma_qp *qp;
1735 struct ocrdma_dev *dev;
1736 struct ib_qp_attr attrs;
Devesh Sharmafe488222015-05-19 11:32:34 +05301737 int attr_mask;
Dan Carpenterd19081e2012-05-02 09:14:47 +03001738 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05301739
1740 qp = get_ocrdma_qp(ibqp);
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301741 dev = get_ocrdma_dev(ibqp->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301742
Parav Panditfe2caef2012-03-21 04:09:06 +05301743 pd = qp->pd;
1744
1745 /* change the QP state to ERROR */
Devesh Sharmafe488222015-05-19 11:32:34 +05301746 if (qp->state != OCRDMA_QPS_RST) {
1747 attrs.qp_state = IB_QPS_ERR;
1748 attr_mask = IB_QP_STATE;
1749 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1750 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301751 /* ensure that CQEs for newly created QP (whose id may be same with
1752 * one which just getting destroyed are same), dont get
1753 * discarded until the old CQEs are discarded.
1754 */
1755 mutex_lock(&dev->dev_lock);
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +05301756 (void) ocrdma_mbx_destroy_qp(dev, qp);
Parav Panditfe2caef2012-03-21 04:09:06 +05301757
1758 /*
1759 * acquire CQ lock while destroy is in progress, in order to
1760 * protect against proessing in-flight CQEs for this QP.
1761 */
Dan Carpenterd19081e2012-05-02 09:14:47 +03001762 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301763 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001764 spin_lock(&qp->rq_cq->cq_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301765
1766 ocrdma_del_qpn_map(dev, qp);
1767
1768 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001769 spin_unlock(&qp->rq_cq->cq_lock);
1770 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301771
1772 if (!pd->uctx) {
1773 ocrdma_discard_cqes(qp, qp->sq_cq);
1774 ocrdma_discard_cqes(qp, qp->rq_cq);
1775 }
1776 mutex_unlock(&dev->dev_lock);
1777
1778 if (pd->uctx) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301779 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1780 PAGE_ALIGN(qp->sq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301781 if (!qp->srq)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301782 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1783 PAGE_ALIGN(qp->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301784 }
1785
1786 ocrdma_del_flush_qp(qp);
1787
Parav Panditfe2caef2012-03-21 04:09:06 +05301788 kfree(qp->wqe_wr_id_tbl);
1789 kfree(qp->rqe_wr_id_tbl);
1790 kfree(qp);
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +05301791 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301792}
1793
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301794static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1795 struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +05301796{
1797 int status;
1798 struct ocrdma_create_srq_uresp uresp;
1799
Dan Carpenter63ea3742013-07-29 22:34:29 +03001800 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +05301801 uresp.rq_dbid = srq->rq.dbid;
1802 uresp.num_rq_pages = 1;
Devesh Sharma1b76d382014-09-05 19:35:40 +05301803 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
Parav Panditfe2caef2012-03-21 04:09:06 +05301804 uresp.rq_page_size = srq->rq.len;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301805 uresp.db_page_addr = dev->nic_info.unmapped_db +
1806 (srq->pd->id * dev->nic_info.db_page_size);
1807 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +05301808 uresp.num_rqe_allocated = srq->rq.max_cnt;
Devesh Sharma21c33912014-02-04 11:56:56 +05301809 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301810 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301811 uresp.db_shift = 24;
1812 } else {
1813 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1814 uresp.db_shift = 16;
1815 }
1816
1817 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1818 if (status)
1819 return status;
1820 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1821 uresp.rq_page_size);
1822 if (status)
1823 return status;
1824 return status;
1825}
1826
1827struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1828 struct ib_srq_init_attr *init_attr,
1829 struct ib_udata *udata)
1830{
1831 int status = -ENOMEM;
1832 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301833 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301834 struct ocrdma_srq *srq;
1835
1836 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1837 return ERR_PTR(-EINVAL);
1838 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1839 return ERR_PTR(-EINVAL);
1840
1841 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1842 if (!srq)
1843 return ERR_PTR(status);
1844
1845 spin_lock_init(&srq->q_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301846 srq->pd = pd;
1847 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301848 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
Parav Panditfe2caef2012-03-21 04:09:06 +05301849 if (status)
1850 goto err;
1851
1852 if (udata == NULL) {
1853 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1854 GFP_KERNEL);
1855 if (srq->rqe_wr_id_tbl == NULL)
1856 goto arm_err;
1857
1858 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1859 (srq->rq.max_cnt % 32 ? 1 : 0);
1860 srq->idx_bit_fields =
1861 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1862 if (srq->idx_bit_fields == NULL)
1863 goto arm_err;
1864 memset(srq->idx_bit_fields, 0xff,
1865 srq->bit_fields_len * sizeof(u32));
1866 }
1867
1868 if (init_attr->attr.srq_limit) {
1869 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1870 if (status)
1871 goto arm_err;
1872 }
1873
Parav Panditfe2caef2012-03-21 04:09:06 +05301874 if (udata) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301875 status = ocrdma_copy_srq_uresp(dev, srq, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +05301876 if (status)
1877 goto arm_err;
1878 }
1879
Parav Panditfe2caef2012-03-21 04:09:06 +05301880 return &srq->ibsrq;
1881
1882arm_err:
1883 ocrdma_mbx_destroy_srq(dev, srq);
1884err:
1885 kfree(srq->rqe_wr_id_tbl);
1886 kfree(srq->idx_bit_fields);
1887 kfree(srq);
1888 return ERR_PTR(status);
1889}
1890
1891int ocrdma_modify_srq(struct ib_srq *ibsrq,
1892 struct ib_srq_attr *srq_attr,
1893 enum ib_srq_attr_mask srq_attr_mask,
1894 struct ib_udata *udata)
1895{
1896 int status = 0;
1897 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301898
1899 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301900 if (srq_attr_mask & IB_SRQ_MAX_WR)
1901 status = -EINVAL;
1902 else
1903 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1904 return status;
1905}
1906
1907int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1908{
1909 int status;
1910 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301911
1912 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301913 status = ocrdma_mbx_query_srq(srq, srq_attr);
1914 return status;
1915}
1916
1917int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1918{
1919 int status;
1920 struct ocrdma_srq *srq;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301921 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301922
1923 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301924
1925 status = ocrdma_mbx_destroy_srq(dev, srq);
1926
1927 if (srq->pd->uctx)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301928 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1929 PAGE_ALIGN(srq->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301930
Parav Panditfe2caef2012-03-21 04:09:06 +05301931 kfree(srq->idx_bit_fields);
1932 kfree(srq->rqe_wr_id_tbl);
1933 kfree(srq);
1934 return status;
1935}
1936
1937/* unprivileged verbs and their support functions. */
1938static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1939 struct ocrdma_hdr_wqe *hdr,
1940 struct ib_send_wr *wr)
1941{
1942 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1943 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1944 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1945
1946 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1947 if (qp->qp_type == IB_QPT_GSI)
1948 ud_hdr->qkey = qp->qkey;
1949 else
1950 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1951 ud_hdr->rsvd_ahid = ah->id;
Devesh Sharma29565f22014-12-18 14:13:07 +05301952 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1953 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05301954}
1955
1956static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1957 struct ocrdma_sge *sge, int num_sge,
1958 struct ib_sge *sg_list)
1959{
1960 int i;
1961
1962 for (i = 0; i < num_sge; i++) {
1963 sge[i].lrkey = sg_list[i].lkey;
1964 sge[i].addr_lo = sg_list[i].addr;
1965 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1966 sge[i].len = sg_list[i].length;
1967 hdr->total_len += sg_list[i].length;
1968 }
1969 if (num_sge == 0)
1970 memset(sge, 0, sizeof(*sge));
1971}
1972
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301973static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1974{
1975 uint32_t total_len = 0, i;
1976
1977 for (i = 0; i < num_sge; i++)
1978 total_len += sg_list[i].length;
1979 return total_len;
1980}
1981
1982
Parav Panditfe2caef2012-03-21 04:09:06 +05301983static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1984 struct ocrdma_hdr_wqe *hdr,
1985 struct ocrdma_sge *sge,
1986 struct ib_send_wr *wr, u32 wqe_size)
1987{
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301988 int i;
1989 char *dpp_addr;
1990
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301991 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301992 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1993 if (unlikely(hdr->total_len > qp->max_inline_data)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001994 pr_err("%s() supported_len=0x%x,\n"
Masanari Iida1a84db52014-08-29 23:37:33 +09001995 " unsupported len req=0x%x\n", __func__,
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301996 qp->max_inline_data, hdr->total_len);
Parav Panditfe2caef2012-03-21 04:09:06 +05301997 return -EINVAL;
1998 }
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301999 dpp_addr = (char *)sge;
2000 for (i = 0; i < wr->num_sge; i++) {
2001 memcpy(dpp_addr,
2002 (void *)(unsigned long)wr->sg_list[i].addr,
2003 wr->sg_list[i].length);
2004 dpp_addr += wr->sg_list[i].length;
2005 }
2006
Parav Panditfe2caef2012-03-21 04:09:06 +05302007 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05302008 if (0 == hdr->total_len)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302009 wqe_size += sizeof(struct ocrdma_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05302010 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
2011 } else {
2012 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2013 if (wr->num_sge)
2014 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
2015 else
2016 wqe_size += sizeof(struct ocrdma_sge);
2017 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2018 }
2019 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2020 return 0;
2021}
2022
2023static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2024 struct ib_send_wr *wr)
2025{
2026 int status;
2027 struct ocrdma_sge *sge;
2028 u32 wqe_size = sizeof(*hdr);
2029
2030 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2031 ocrdma_build_ud_hdr(qp, hdr, wr);
2032 sge = (struct ocrdma_sge *)(hdr + 2);
2033 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302034 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302035 sge = (struct ocrdma_sge *)(hdr + 1);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302036 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302037
2038 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2039 return status;
2040}
2041
2042static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2043 struct ib_send_wr *wr)
2044{
2045 int status;
2046 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2047 struct ocrdma_sge *sge = ext_rw + 1;
2048 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2049
2050 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2051 if (status)
2052 return status;
2053 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2054 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2055 ext_rw->lrkey = wr->wr.rdma.rkey;
2056 ext_rw->len = hdr->total_len;
2057 return 0;
2058}
2059
2060static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2061 struct ib_send_wr *wr)
2062{
2063 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2064 struct ocrdma_sge *sge = ext_rw + 1;
2065 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2066 sizeof(struct ocrdma_hdr_wqe);
2067
2068 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2069 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2070 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2071 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2072
2073 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2074 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2075 ext_rw->lrkey = wr->wr.rdma.rkey;
2076 ext_rw->len = hdr->total_len;
2077}
2078
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302079static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
2080 struct ocrdma_hw_mr *hwmr)
2081{
2082 int i;
2083 u64 buf_addr = 0;
2084 int num_pbes;
2085 struct ocrdma_pbe *pbe;
2086
2087 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2088 num_pbes = 0;
2089
2090 /* go through the OS phy regions & fill hw pbe entries into pbls. */
2091 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
2092 /* number of pbes can be more for one OS buf, when
2093 * buffers are of different sizes.
2094 * split the ib_buf to one or more pbes.
2095 */
2096 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
2097 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2098 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2099 num_pbes += 1;
2100 pbe++;
2101
2102 /* if the pbl is full storing the pbes,
2103 * move to next pbl.
2104 */
2105 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2106 pbl_tbl++;
2107 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2108 }
2109 }
2110 return;
2111}
2112
2113static int get_encoded_page_size(int pg_sz)
2114{
2115 /* Max size is 256M 4096 << 16 */
2116 int i = 0;
2117 for (; i < 17; i++)
2118 if (pg_sz == (4096 << i))
2119 break;
2120 return i;
2121}
2122
2123
2124static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2125 struct ib_send_wr *wr)
2126{
2127 u64 fbo;
2128 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2129 struct ocrdma_mr *mr;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302130 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302131 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2132
2133 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2134
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302135 if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302136 return -EINVAL;
2137
2138 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2139 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2140
2141 if (wr->wr.fast_reg.page_list_len == 0)
2142 BUG();
2143 if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
2144 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2145 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
2146 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2147 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
2148 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2149 hdr->lkey = wr->wr.fast_reg.rkey;
2150 hdr->total_len = wr->wr.fast_reg.length;
2151
2152 fbo = wr->wr.fast_reg.iova_start -
2153 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2154
2155 fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
2156 fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
2157 fast_reg->fbo_hi = upper_32_bits(fbo);
2158 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2159 fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
2160 fast_reg->size_sge =
2161 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
Roland Dreier7a1e89d2014-03-17 23:14:17 -07002162 mr = (struct ocrdma_mr *) (unsigned long)
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302163 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302164 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2165 return 0;
2166}
2167
Parav Panditfe2caef2012-03-21 04:09:06 +05302168static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2169{
Devesh Sharma2df84fa82014-02-04 11:56:55 +05302170 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05302171
2172 iowrite32(val, qp->sq_db);
2173}
2174
2175int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2176 struct ib_send_wr **bad_wr)
2177{
2178 int status = 0;
2179 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2180 struct ocrdma_hdr_wqe *hdr;
2181 unsigned long flags;
2182
2183 spin_lock_irqsave(&qp->q_lock, flags);
2184 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2185 spin_unlock_irqrestore(&qp->q_lock, flags);
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00002186 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05302187 return -EINVAL;
2188 }
2189
2190 while (wr) {
Mitesh Ahujaf252b5d2014-06-10 19:32:20 +05302191 if (qp->qp_type == IB_QPT_UD &&
2192 (wr->opcode != IB_WR_SEND &&
2193 wr->opcode != IB_WR_SEND_WITH_IMM)) {
2194 *bad_wr = wr;
2195 status = -EINVAL;
2196 break;
2197 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302198 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2199 wr->num_sge > qp->sq.max_sges) {
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00002200 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05302201 status = -ENOMEM;
2202 break;
2203 }
2204 hdr = ocrdma_hwq_head(&qp->sq);
2205 hdr->cw = 0;
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05302206 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
Parav Panditfe2caef2012-03-21 04:09:06 +05302207 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2208 if (wr->send_flags & IB_SEND_FENCE)
2209 hdr->cw |=
2210 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2211 if (wr->send_flags & IB_SEND_SOLICITED)
2212 hdr->cw |=
2213 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2214 hdr->total_len = 0;
2215 switch (wr->opcode) {
2216 case IB_WR_SEND_WITH_IMM:
2217 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2218 hdr->immdt = ntohl(wr->ex.imm_data);
2219 case IB_WR_SEND:
2220 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2221 ocrdma_build_send(qp, hdr, wr);
2222 break;
2223 case IB_WR_SEND_WITH_INV:
2224 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2225 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2226 hdr->lkey = wr->ex.invalidate_rkey;
2227 status = ocrdma_build_send(qp, hdr, wr);
2228 break;
2229 case IB_WR_RDMA_WRITE_WITH_IMM:
2230 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2231 hdr->immdt = ntohl(wr->ex.imm_data);
2232 case IB_WR_RDMA_WRITE:
2233 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2234 status = ocrdma_build_write(qp, hdr, wr);
2235 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302236 case IB_WR_RDMA_READ:
2237 ocrdma_build_read(qp, hdr, wr);
2238 break;
2239 case IB_WR_LOCAL_INV:
2240 hdr->cw |=
2241 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302242 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2243 sizeof(struct ocrdma_sge)) /
Parav Panditfe2caef2012-03-21 04:09:06 +05302244 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2245 hdr->lkey = wr->ex.invalidate_rkey;
2246 break;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302247 case IB_WR_FAST_REG_MR:
2248 status = ocrdma_build_fr(qp, hdr, wr);
2249 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302250 default:
2251 status = -EINVAL;
2252 break;
2253 }
2254 if (status) {
2255 *bad_wr = wr;
2256 break;
2257 }
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05302258 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
Parav Panditfe2caef2012-03-21 04:09:06 +05302259 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2260 else
2261 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2262 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2263 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2264 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2265 /* make sure wqe is written before adapter can access it */
2266 wmb();
2267 /* inform hw to start processing it */
2268 ocrdma_ring_sq_db(qp);
2269
2270 /* update pointer, counter for next wr */
2271 ocrdma_hwq_inc_head(&qp->sq);
2272 wr = wr->next;
2273 }
2274 spin_unlock_irqrestore(&qp->q_lock, flags);
2275 return status;
2276}
2277
2278static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2279{
Devesh Sharma2df84fa82014-02-04 11:56:55 +05302280 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05302281
Devesh Sharma2df84fa82014-02-04 11:56:55 +05302282 iowrite32(val, qp->rq_db);
Parav Panditfe2caef2012-03-21 04:09:06 +05302283}
2284
2285static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2286 u16 tag)
2287{
2288 u32 wqe_size = 0;
2289 struct ocrdma_sge *sge;
2290 if (wr->num_sge)
2291 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2292 else
2293 wqe_size = sizeof(*sge) + sizeof(*rqe);
2294
2295 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2296 OCRDMA_WQE_SIZE_SHIFT);
2297 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2298 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2299 rqe->total_len = 0;
2300 rqe->rsvd_tag = tag;
2301 sge = (struct ocrdma_sge *)(rqe + 1);
2302 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2303 ocrdma_cpu_to_le32(rqe, wqe_size);
2304}
2305
2306int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2307 struct ib_recv_wr **bad_wr)
2308{
2309 int status = 0;
2310 unsigned long flags;
2311 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2312 struct ocrdma_hdr_wqe *rqe;
2313
2314 spin_lock_irqsave(&qp->q_lock, flags);
2315 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2316 spin_unlock_irqrestore(&qp->q_lock, flags);
2317 *bad_wr = wr;
2318 return -EINVAL;
2319 }
2320 while (wr) {
2321 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2322 wr->num_sge > qp->rq.max_sges) {
2323 *bad_wr = wr;
2324 status = -ENOMEM;
2325 break;
2326 }
2327 rqe = ocrdma_hwq_head(&qp->rq);
2328 ocrdma_build_rqe(rqe, wr, 0);
2329
2330 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2331 /* make sure rqe is written before adapter can access it */
2332 wmb();
2333
2334 /* inform hw to start processing it */
2335 ocrdma_ring_rq_db(qp);
2336
2337 /* update pointer, counter for next wr */
2338 ocrdma_hwq_inc_head(&qp->rq);
2339 wr = wr->next;
2340 }
2341 spin_unlock_irqrestore(&qp->q_lock, flags);
2342 return status;
2343}
2344
2345/* cqe for srq's rqe can potentially arrive out of order.
2346 * index gives the entry in the shadow table where to store
2347 * the wr_id. tag/index is returned in cqe to reference back
2348 * for a given rqe.
2349 */
2350static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2351{
2352 int row = 0;
2353 int indx = 0;
2354
2355 for (row = 0; row < srq->bit_fields_len; row++) {
2356 if (srq->idx_bit_fields[row]) {
2357 indx = ffs(srq->idx_bit_fields[row]);
2358 indx = (row * 32) + (indx - 1);
2359 if (indx >= srq->rq.max_cnt)
2360 BUG();
2361 ocrdma_srq_toggle_bit(srq, indx);
2362 break;
2363 }
2364 }
2365
2366 if (row == srq->bit_fields_len)
2367 BUG();
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302368 return indx + 1; /* Use from index 1 */
Parav Panditfe2caef2012-03-21 04:09:06 +05302369}
2370
2371static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2372{
2373 u32 val = srq->rq.dbid | (1 << 16);
2374
2375 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2376}
2377
2378int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2379 struct ib_recv_wr **bad_wr)
2380{
2381 int status = 0;
2382 unsigned long flags;
2383 struct ocrdma_srq *srq;
2384 struct ocrdma_hdr_wqe *rqe;
2385 u16 tag;
2386
2387 srq = get_ocrdma_srq(ibsrq);
2388
2389 spin_lock_irqsave(&srq->q_lock, flags);
2390 while (wr) {
2391 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2392 wr->num_sge > srq->rq.max_sges) {
2393 status = -ENOMEM;
2394 *bad_wr = wr;
2395 break;
2396 }
2397 tag = ocrdma_srq_get_idx(srq);
2398 rqe = ocrdma_hwq_head(&srq->rq);
2399 ocrdma_build_rqe(rqe, wr, tag);
2400
2401 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2402 /* make sure rqe is written before adapter can perform DMA */
2403 wmb();
2404 /* inform hw to start processing it */
2405 ocrdma_ring_srq_db(srq);
2406 /* update pointer, counter for next wr */
2407 ocrdma_hwq_inc_head(&srq->rq);
2408 wr = wr->next;
2409 }
2410 spin_unlock_irqrestore(&srq->q_lock, flags);
2411 return status;
2412}
2413
2414static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2415{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302416 enum ib_wc_status ibwc_status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302417
2418 switch (status) {
2419 case OCRDMA_CQE_GENERAL_ERR:
2420 ibwc_status = IB_WC_GENERAL_ERR;
2421 break;
2422 case OCRDMA_CQE_LOC_LEN_ERR:
2423 ibwc_status = IB_WC_LOC_LEN_ERR;
2424 break;
2425 case OCRDMA_CQE_LOC_QP_OP_ERR:
2426 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2427 break;
2428 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2429 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2430 break;
2431 case OCRDMA_CQE_LOC_PROT_ERR:
2432 ibwc_status = IB_WC_LOC_PROT_ERR;
2433 break;
2434 case OCRDMA_CQE_WR_FLUSH_ERR:
2435 ibwc_status = IB_WC_WR_FLUSH_ERR;
2436 break;
2437 case OCRDMA_CQE_MW_BIND_ERR:
2438 ibwc_status = IB_WC_MW_BIND_ERR;
2439 break;
2440 case OCRDMA_CQE_BAD_RESP_ERR:
2441 ibwc_status = IB_WC_BAD_RESP_ERR;
2442 break;
2443 case OCRDMA_CQE_LOC_ACCESS_ERR:
2444 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2445 break;
2446 case OCRDMA_CQE_REM_INV_REQ_ERR:
2447 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2448 break;
2449 case OCRDMA_CQE_REM_ACCESS_ERR:
2450 ibwc_status = IB_WC_REM_ACCESS_ERR;
2451 break;
2452 case OCRDMA_CQE_REM_OP_ERR:
2453 ibwc_status = IB_WC_REM_OP_ERR;
2454 break;
2455 case OCRDMA_CQE_RETRY_EXC_ERR:
2456 ibwc_status = IB_WC_RETRY_EXC_ERR;
2457 break;
2458 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2459 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2460 break;
2461 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2462 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2463 break;
2464 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2465 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2466 break;
2467 case OCRDMA_CQE_REM_ABORT_ERR:
2468 ibwc_status = IB_WC_REM_ABORT_ERR;
2469 break;
2470 case OCRDMA_CQE_INV_EECN_ERR:
2471 ibwc_status = IB_WC_INV_EECN_ERR;
2472 break;
2473 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2474 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2475 break;
2476 case OCRDMA_CQE_FATAL_ERR:
2477 ibwc_status = IB_WC_FATAL_ERR;
2478 break;
2479 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2480 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2481 break;
2482 default:
2483 ibwc_status = IB_WC_GENERAL_ERR;
2484 break;
Joe Perches2b50176d2013-10-08 16:07:22 -07002485 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302486 return ibwc_status;
2487}
2488
2489static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2490 u32 wqe_idx)
2491{
2492 struct ocrdma_hdr_wqe *hdr;
2493 struct ocrdma_sge *rw;
2494 int opcode;
2495
2496 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2497
2498 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2499 /* Undo the hdr->cw swap */
2500 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2501 switch (opcode) {
2502 case OCRDMA_WRITE:
2503 ibwc->opcode = IB_WC_RDMA_WRITE;
2504 break;
2505 case OCRDMA_READ:
2506 rw = (struct ocrdma_sge *)(hdr + 1);
2507 ibwc->opcode = IB_WC_RDMA_READ;
2508 ibwc->byte_len = rw->len;
2509 break;
2510 case OCRDMA_SEND:
2511 ibwc->opcode = IB_WC_SEND;
2512 break;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302513 case OCRDMA_FR_MR:
2514 ibwc->opcode = IB_WC_FAST_REG_MR;
2515 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302516 case OCRDMA_LKEY_INV:
2517 ibwc->opcode = IB_WC_LOCAL_INV;
2518 break;
2519 default:
2520 ibwc->status = IB_WC_GENERAL_ERR;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002521 pr_err("%s() invalid opcode received = 0x%x\n",
2522 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +05302523 break;
Joe Perches2b50176d2013-10-08 16:07:22 -07002524 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302525}
2526
2527static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2528 struct ocrdma_cqe *cqe)
2529{
2530 if (is_cqe_for_sq(cqe)) {
2531 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2532 cqe->flags_status_srcqpn) &
2533 ~OCRDMA_CQE_STATUS_MASK);
2534 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2535 cqe->flags_status_srcqpn) |
2536 (OCRDMA_CQE_WR_FLUSH_ERR <<
2537 OCRDMA_CQE_STATUS_SHIFT));
2538 } else {
2539 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2540 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2541 cqe->flags_status_srcqpn) &
2542 ~OCRDMA_CQE_UD_STATUS_MASK);
2543 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2544 cqe->flags_status_srcqpn) |
2545 (OCRDMA_CQE_WR_FLUSH_ERR <<
2546 OCRDMA_CQE_UD_STATUS_SHIFT));
2547 } else {
2548 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2549 cqe->flags_status_srcqpn) &
2550 ~OCRDMA_CQE_STATUS_MASK);
2551 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2552 cqe->flags_status_srcqpn) |
2553 (OCRDMA_CQE_WR_FLUSH_ERR <<
2554 OCRDMA_CQE_STATUS_SHIFT));
2555 }
2556 }
2557}
2558
2559static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2560 struct ocrdma_qp *qp, int status)
2561{
2562 bool expand = false;
2563
2564 ibwc->byte_len = 0;
2565 ibwc->qp = &qp->ibqp;
2566 ibwc->status = ocrdma_to_ibwc_err(status);
2567
2568 ocrdma_flush_qp(qp);
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302569 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
Parav Panditfe2caef2012-03-21 04:09:06 +05302570
2571 /* if wqe/rqe pending for which cqe needs to be returned,
2572 * trigger inflating it.
2573 */
2574 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2575 expand = true;
2576 ocrdma_set_cqe_status_flushed(qp, cqe);
2577 }
2578 return expand;
2579}
2580
2581static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2582 struct ocrdma_qp *qp, int status)
2583{
2584 ibwc->opcode = IB_WC_RECV;
2585 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2586 ocrdma_hwq_inc_tail(&qp->rq);
2587
2588 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2589}
2590
2591static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2592 struct ocrdma_qp *qp, int status)
2593{
2594 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2595 ocrdma_hwq_inc_tail(&qp->sq);
2596
2597 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2598}
2599
2600
2601static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2602 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2603 bool *polled, bool *stop)
2604{
2605 bool expand;
Selvin Xavierad56ebb2014-12-18 14:12:59 +05302606 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302607 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2608 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
Selvin Xavierad56ebb2014-12-18 14:12:59 +05302609 if (status < OCRDMA_MAX_CQE_ERR)
2610 atomic_inc(&dev->cqe_err_stats[status]);
Parav Panditfe2caef2012-03-21 04:09:06 +05302611
2612 /* when hw sq is empty, but rq is not empty, so we continue
2613 * to keep the cqe in order to get the cq event again.
2614 */
2615 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2616 /* when cq for rq and sq is same, it is safe to return
2617 * flush cqe for RQEs.
2618 */
2619 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2620 *polled = true;
2621 status = OCRDMA_CQE_WR_FLUSH_ERR;
2622 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2623 } else {
2624 /* stop processing further cqe as this cqe is used for
2625 * triggering cq event on buddy cq of RQ.
2626 * When QP is destroyed, this cqe will be removed
2627 * from the cq's hardware q.
2628 */
2629 *polled = false;
2630 *stop = true;
2631 expand = false;
2632 }
Selvin Xaviera96ffb12014-06-10 19:32:19 +05302633 } else if (is_hw_sq_empty(qp)) {
2634 /* Do nothing */
2635 expand = false;
2636 *polled = false;
2637 *stop = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302638 } else {
2639 *polled = true;
2640 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2641 }
2642 return expand;
2643}
2644
2645static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2646 struct ocrdma_cqe *cqe,
2647 struct ib_wc *ibwc, bool *polled)
2648{
2649 bool expand = false;
2650 int tail = qp->sq.tail;
2651 u32 wqe_idx;
2652
2653 if (!qp->wqe_wr_id_tbl[tail].signaled) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302654 *polled = false; /* WC cannot be consumed yet */
2655 } else {
2656 ibwc->status = IB_WC_SUCCESS;
2657 ibwc->wc_flags = 0;
2658 ibwc->qp = &qp->ibqp;
2659 ocrdma_update_wc(qp, ibwc, tail);
2660 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302661 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302662 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2663 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
Parav Panditae3bca92012-08-17 14:45:33 +00002664 if (tail != wqe_idx)
2665 expand = true; /* Coalesced CQE can't be consumed yet */
2666
Parav Panditfe2caef2012-03-21 04:09:06 +05302667 ocrdma_hwq_inc_tail(&qp->sq);
2668 return expand;
2669}
2670
2671static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2672 struct ib_wc *ibwc, bool *polled, bool *stop)
2673{
2674 int status;
2675 bool expand;
2676
2677 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2678 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2679
2680 if (status == OCRDMA_CQE_SUCCESS)
2681 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2682 else
2683 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2684 return expand;
2685}
2686
2687static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2688{
2689 int status;
2690
2691 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2692 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2693 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2694 OCRDMA_CQE_SRCQP_MASK;
2695 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2696 OCRDMA_CQE_PKEY_MASK;
2697 ibwc->wc_flags = IB_WC_GRH;
2698 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2699 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2700 return status;
2701}
2702
2703static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2704 struct ocrdma_cqe *cqe,
2705 struct ocrdma_qp *qp)
2706{
2707 unsigned long flags;
2708 struct ocrdma_srq *srq;
2709 u32 wqe_idx;
2710
2711 srq = get_ocrdma_srq(qp->ibqp.srq);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302712 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302713 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2714 if (wqe_idx < 1)
2715 BUG();
2716
Parav Panditfe2caef2012-03-21 04:09:06 +05302717 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2718 spin_lock_irqsave(&srq->q_lock, flags);
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302719 ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05302720 spin_unlock_irqrestore(&srq->q_lock, flags);
2721 ocrdma_hwq_inc_tail(&srq->rq);
2722}
2723
2724static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2725 struct ib_wc *ibwc, bool *polled, bool *stop,
2726 int status)
2727{
2728 bool expand;
Selvin Xavierad56ebb2014-12-18 14:12:59 +05302729 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2730
2731 if (status < OCRDMA_MAX_CQE_ERR)
2732 atomic_inc(&dev->cqe_err_stats[status]);
Parav Panditfe2caef2012-03-21 04:09:06 +05302733
2734 /* when hw_rq is empty, but wq is not empty, so continue
2735 * to keep the cqe to get the cq event again.
2736 */
2737 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2738 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2739 *polled = true;
2740 status = OCRDMA_CQE_WR_FLUSH_ERR;
2741 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2742 } else {
2743 *polled = false;
2744 *stop = true;
2745 expand = false;
2746 }
Selvin Xaviera96ffb12014-06-10 19:32:19 +05302747 } else if (is_hw_rq_empty(qp)) {
2748 /* Do nothing */
2749 expand = false;
2750 *polled = false;
2751 *stop = false;
Parav Pandita3698a92012-06-11 16:39:20 +05302752 } else {
2753 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302754 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
Parav Pandita3698a92012-06-11 16:39:20 +05302755 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302756 return expand;
2757}
2758
2759static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2760 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2761{
2762 ibwc->opcode = IB_WC_RECV;
2763 ibwc->qp = &qp->ibqp;
2764 ibwc->status = IB_WC_SUCCESS;
2765
2766 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2767 ocrdma_update_ud_rcqe(ibwc, cqe);
2768 else
2769 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2770
2771 if (is_cqe_imm(cqe)) {
2772 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2773 ibwc->wc_flags |= IB_WC_WITH_IMM;
2774 } else if (is_cqe_wr_imm(cqe)) {
2775 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2776 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2777 ibwc->wc_flags |= IB_WC_WITH_IMM;
2778 } else if (is_cqe_invalidated(cqe)) {
2779 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2780 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2781 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302782 if (qp->ibqp.srq) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302783 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302784 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302785 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2786 ocrdma_hwq_inc_tail(&qp->rq);
2787 }
2788}
2789
2790static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2791 struct ib_wc *ibwc, bool *polled, bool *stop)
2792{
2793 int status;
2794 bool expand = false;
2795
2796 ibwc->wc_flags = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302797 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302798 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2799 OCRDMA_CQE_UD_STATUS_MASK) >>
2800 OCRDMA_CQE_UD_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302801 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302802 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2803 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302804 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302805
2806 if (status == OCRDMA_CQE_SUCCESS) {
2807 *polled = true;
2808 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2809 } else {
2810 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2811 status);
2812 }
2813 return expand;
2814}
2815
2816static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2817 u16 cur_getp)
2818{
2819 if (cq->phase_change) {
2820 if (cur_getp == 0)
2821 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302822 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302823 /* clear valid bit */
2824 cqe->flags_status_srcqpn = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302825 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302826}
2827
2828static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2829 struct ib_wc *ibwc)
2830{
2831 u16 qpn = 0;
2832 int i = 0;
2833 bool expand = false;
2834 int polled_hw_cqes = 0;
2835 struct ocrdma_qp *qp = NULL;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302836 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302837 struct ocrdma_cqe *cqe;
2838 u16 cur_getp; bool polled = false; bool stop = false;
2839
2840 cur_getp = cq->getp;
2841 while (num_entries) {
2842 cqe = cq->va + cur_getp;
2843 /* check whether valid cqe or not */
2844 if (!is_cqe_valid(cq, cqe))
2845 break;
2846 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2847 /* ignore discarded cqe */
2848 if (qpn == 0)
2849 goto skip_cqe;
2850 qp = dev->qp_tbl[qpn];
2851 BUG_ON(qp == NULL);
2852
2853 if (is_cqe_for_sq(cqe)) {
2854 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2855 &stop);
2856 } else {
2857 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2858 &stop);
2859 }
2860 if (expand)
2861 goto expand_cqe;
2862 if (stop)
2863 goto stop_cqe;
2864 /* clear qpn to avoid duplicate processing by discard_cqe() */
2865 cqe->cmn.qpn = 0;
2866skip_cqe:
2867 polled_hw_cqes += 1;
2868 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2869 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2870expand_cqe:
2871 if (polled) {
2872 num_entries -= 1;
2873 i += 1;
2874 ibwc = ibwc + 1;
2875 polled = false;
2876 }
2877 }
2878stop_cqe:
2879 cq->getp = cur_getp;
Devesh Sharmaea6176262014-02-04 11:56:54 +05302880 if (cq->deferred_arm) {
2881 ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
Parav Panditfe2caef2012-03-21 04:09:06 +05302882 polled_hw_cqes);
Devesh Sharmaea6176262014-02-04 11:56:54 +05302883 cq->deferred_arm = false;
2884 cq->deferred_sol = false;
2885 } else {
2886 /* We need to pop the CQE. No need to arm */
2887 ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
2888 polled_hw_cqes);
2889 cq->deferred_sol = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302890 }
Devesh Sharmaea6176262014-02-04 11:56:54 +05302891
Parav Panditfe2caef2012-03-21 04:09:06 +05302892 return i;
2893}
2894
2895/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2896static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2897 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2898{
2899 int err_cqes = 0;
2900
2901 while (num_entries) {
2902 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2903 break;
2904 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2905 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2906 ocrdma_hwq_inc_tail(&qp->sq);
2907 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2908 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2909 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302910 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302911 return err_cqes;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302912 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302913 ibwc->byte_len = 0;
2914 ibwc->status = IB_WC_WR_FLUSH_ERR;
2915 ibwc = ibwc + 1;
2916 err_cqes += 1;
2917 num_entries -= 1;
2918 }
2919 return err_cqes;
2920}
2921
2922int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2923{
2924 int cqes_to_poll = num_entries;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302925 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2926 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302927 int num_os_cqe = 0, err_cqes = 0;
2928 struct ocrdma_qp *qp;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302929 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05302930
2931 /* poll cqes from adapter CQ */
2932 spin_lock_irqsave(&cq->cq_lock, flags);
2933 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2934 spin_unlock_irqrestore(&cq->cq_lock, flags);
2935 cqes_to_poll -= num_os_cqe;
2936
2937 if (cqes_to_poll) {
2938 wc = wc + num_os_cqe;
2939 /* adapter returns single error cqe when qp moves to
2940 * error state. So insert error cqes with wc_status as
2941 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2942 * respectively which uses this CQ.
2943 */
2944 spin_lock_irqsave(&dev->flush_q_lock, flags);
2945 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2946 if (cqes_to_poll == 0)
2947 break;
2948 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2949 cqes_to_poll -= err_cqes;
2950 num_os_cqe += err_cqes;
2951 wc = wc + err_cqes;
2952 }
2953 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2954 }
2955 return num_os_cqe;
2956}
2957
2958int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2959{
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302960 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2961 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302962 u16 cq_id;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302963 unsigned long flags;
Devesh Sharmaea6176262014-02-04 11:56:54 +05302964 bool arm_needed = false, sol_needed = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302965
Parav Panditfe2caef2012-03-21 04:09:06 +05302966 cq_id = cq->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05302967
2968 spin_lock_irqsave(&cq->cq_lock, flags);
2969 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
Devesh Sharmaea6176262014-02-04 11:56:54 +05302970 arm_needed = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302971 if (cq_flags & IB_CQ_SOLICITED)
Devesh Sharmaea6176262014-02-04 11:56:54 +05302972 sol_needed = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302973
Devesh Sharmaea6176262014-02-04 11:56:54 +05302974 if (cq->first_arm) {
2975 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2976 cq->first_arm = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302977 }
Devesh Sharmaea6176262014-02-04 11:56:54 +05302978
Devesh Sharmaf93439e2014-06-09 10:52:38 +05302979 cq->deferred_arm = true;
Devesh Sharmaea6176262014-02-04 11:56:54 +05302980 cq->deferred_sol = sol_needed;
Parav Panditfe2caef2012-03-21 04:09:06 +05302981 spin_unlock_irqrestore(&cq->cq_lock, flags);
Devesh Sharmaea6176262014-02-04 11:56:54 +05302982
Parav Panditfe2caef2012-03-21 04:09:06 +05302983 return 0;
2984}
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302985
2986struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2987{
2988 int status;
2989 struct ocrdma_mr *mr;
2990 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2991 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2992
2993 if (max_page_list_len > dev->attr.max_pages_per_frmr)
2994 return ERR_PTR(-EINVAL);
2995
2996 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2997 if (!mr)
2998 return ERR_PTR(-ENOMEM);
2999
3000 status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
3001 if (status)
3002 goto pbl_err;
3003 mr->hwmr.fr_mr = 1;
3004 mr->hwmr.remote_rd = 0;
3005 mr->hwmr.remote_wr = 0;
3006 mr->hwmr.local_rd = 0;
3007 mr->hwmr.local_wr = 0;
3008 mr->hwmr.mw_bind = 0;
3009 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3010 if (status)
3011 goto pbl_err;
3012 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
3013 if (status)
3014 goto mbx_err;
3015 mr->ibmr.rkey = mr->hwmr.lkey;
3016 mr->ibmr.lkey = mr->hwmr.lkey;
Roland Dreier7a1e89d2014-03-17 23:14:17 -07003017 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
3018 (unsigned long) mr;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05303019 return &mr->ibmr;
3020mbx_err:
3021 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3022pbl_err:
3023 kfree(mr);
3024 return ERR_PTR(-ENOMEM);
3025}
3026
3027struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
3028 *ibdev,
3029 int page_list_len)
3030{
3031 struct ib_fast_reg_page_list *frmr_list;
3032 int size;
3033
3034 size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
3035 frmr_list = kzalloc(size, GFP_KERNEL);
3036 if (!frmr_list)
3037 return ERR_PTR(-ENOMEM);
3038 frmr_list->page_list = (u64 *)(frmr_list + 1);
3039 return frmr_list;
3040}
3041
3042void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
3043{
3044 kfree(page_list);
3045}
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05303046
3047#define MAX_KERNEL_PBE_SIZE 65536
3048static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
3049 int buf_cnt, u32 *pbe_size)
3050{
3051 u64 total_size = 0;
3052 u64 buf_size = 0;
3053 int i;
3054 *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
3055 *pbe_size = roundup_pow_of_two(*pbe_size);
3056
3057 /* find the smallest PBE size that we can have */
3058 for (i = 0; i < buf_cnt; i++) {
3059 /* first addr may not be page aligned, so ignore checking */
3060 if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
3061 (buf_list[i].size & ~PAGE_MASK))) {
3062 return 0;
3063 }
3064
3065 /* if configured PBE size is greater then the chosen one,
3066 * reduce the PBE size.
3067 */
3068 buf_size = roundup(buf_list[i].size, PAGE_SIZE);
3069 /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
3070 buf_size = roundup_pow_of_two(buf_size);
3071 if (*pbe_size > buf_size)
3072 *pbe_size = buf_size;
3073
3074 total_size += buf_size;
3075 }
3076 *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
3077 (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
3078
3079 /* num_pbes = total_size / (*pbe_size); this is implemented below. */
3080
3081 return total_size >> ilog2(*pbe_size);
3082}
3083
3084static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
3085 u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
3086 struct ocrdma_hw_mr *hwmr)
3087{
3088 int i;
3089 int idx;
3090 int pbes_per_buf = 0;
3091 u64 buf_addr = 0;
3092 int num_pbes;
3093 struct ocrdma_pbe *pbe;
3094 int total_num_pbes = 0;
3095
3096 if (!hwmr->num_pbes)
3097 return;
3098
3099 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3100 num_pbes = 0;
3101
3102 /* go through the OS phy regions & fill hw pbe entries into pbls. */
3103 for (i = 0; i < ib_buf_cnt; i++) {
3104 buf_addr = buf_list[i].addr;
3105 pbes_per_buf =
3106 roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
3107 pbe_size;
3108 hwmr->len += buf_list[i].size;
3109 /* number of pbes can be more for one OS buf, when
3110 * buffers are of different sizes.
3111 * split the ib_buf to one or more pbes.
3112 */
3113 for (idx = 0; idx < pbes_per_buf; idx++) {
3114 /* we program always page aligned addresses,
3115 * first unaligned address is taken care by fbo.
3116 */
3117 if (i == 0) {
3118 /* for non zero fbo, assign the
3119 * start of the page.
3120 */
3121 pbe->pa_lo =
3122 cpu_to_le32((u32) (buf_addr & PAGE_MASK));
3123 pbe->pa_hi =
3124 cpu_to_le32((u32) upper_32_bits(buf_addr));
3125 } else {
3126 pbe->pa_lo =
3127 cpu_to_le32((u32) (buf_addr & 0xffffffff));
3128 pbe->pa_hi =
3129 cpu_to_le32((u32) upper_32_bits(buf_addr));
3130 }
3131 buf_addr += pbe_size;
3132 num_pbes += 1;
3133 total_num_pbes += 1;
3134 pbe++;
3135
3136 if (total_num_pbes == hwmr->num_pbes)
3137 goto mr_tbl_done;
3138 /* if the pbl is full storing the pbes,
3139 * move to next pbl.
3140 */
3141 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
3142 pbl_tbl++;
3143 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3144 num_pbes = 0;
3145 }
3146 }
3147 }
3148mr_tbl_done:
3149 return;
3150}
3151
3152struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
3153 struct ib_phys_buf *buf_list,
3154 int buf_cnt, int acc, u64 *iova_start)
3155{
3156 int status = -ENOMEM;
3157 struct ocrdma_mr *mr;
3158 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3159 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3160 u32 num_pbes;
3161 u32 pbe_size = 0;
3162
3163 if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
3164 return ERR_PTR(-EINVAL);
3165
3166 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3167 if (!mr)
3168 return ERR_PTR(status);
3169
3170 num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
3171 if (num_pbes == 0) {
3172 status = -EINVAL;
3173 goto pbl_err;
3174 }
3175 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
3176 if (status)
3177 goto pbl_err;
3178
3179 mr->hwmr.pbe_size = pbe_size;
3180 mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
3181 mr->hwmr.va = *iova_start;
3182 mr->hwmr.local_rd = 1;
3183 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3184 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3185 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3186 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3187 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
3188
3189 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3190 if (status)
3191 goto pbl_err;
3192 build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
3193 &mr->hwmr);
3194 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
3195 if (status)
3196 goto mbx_err;
3197
3198 mr->ibmr.lkey = mr->hwmr.lkey;
3199 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
3200 mr->ibmr.rkey = mr->hwmr.lkey;
3201 return &mr->ibmr;
3202
3203mbx_err:
3204 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3205pbl_err:
3206 kfree(mr);
3207 return ERR_PTR(status);
3208}