blob: 26a698927d2e4a00ffe51602f0fb1ab9dc446103 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Kalderon, Michal99d195c2017-07-26 14:41:51 +030052#include "qedr_roce_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
Amrani, Ramc75d3ec2017-06-26 19:05:04 +030056static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
Ram Amrania7efd772016-10-10 13:15:33 +030064int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
Kalderon, Michale6a38c52017-07-26 14:41:52 +030073int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
Ram Amraniac1b36e2016-10-10 13:15:32 +030087int qedr_query_device(struct ib_device *ibdev,
88 struct ib_device_attr *attr, struct ib_udata *udata)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 struct qedr_device_attr *qattr = &dev->attr;
92
93 if (!dev->rdma_ctx) {
94 DP_ERR(dev,
95 "qedr_query_device called with invalid params rdma_ctx=%p\n",
96 dev->rdma_ctx);
97 return -EINVAL;
98 }
99
100 memset(attr, 0, sizeof(*attr));
101
102 attr->fw_ver = qattr->fw_ver;
103 attr->sys_image_guid = qattr->sys_image_guid;
104 attr->max_mr_size = qattr->max_mr_size;
105 attr->page_size_cap = qattr->page_size_caps;
106 attr->vendor_id = qattr->vendor_id;
107 attr->vendor_part_id = qattr->vendor_part_id;
108 attr->hw_ver = qattr->hw_ver;
109 attr->max_qp = qattr->max_qp;
110 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
111 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
112 IB_DEVICE_RC_RNR_NAK_GEN |
113 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
114
115 attr->max_sge = qattr->max_sge;
116 attr->max_sge_rd = qattr->max_sge;
117 attr->max_cq = qattr->max_cq;
118 attr->max_cqe = qattr->max_cqe;
119 attr->max_mr = qattr->max_mr;
120 attr->max_mw = qattr->max_mw;
121 attr->max_pd = qattr->max_pd;
122 attr->atomic_cap = dev->atomic_cap;
123 attr->max_fmr = qattr->max_fmr;
124 attr->max_map_per_fmr = 16;
125 attr->max_qp_init_rd_atom =
126 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
127 attr->max_qp_rd_atom =
128 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
129 attr->max_qp_init_rd_atom);
130
131 attr->max_srq = qattr->max_srq;
132 attr->max_srq_sge = qattr->max_srq_sge;
133 attr->max_srq_wr = qattr->max_srq_wr;
134
135 attr->local_ca_ack_delay = qattr->dev_ack_delay;
136 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
137 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
138 attr->max_ah = qattr->max_ah;
139
140 return 0;
141}
142
143#define QEDR_SPEED_SDR (1)
144#define QEDR_SPEED_DDR (2)
145#define QEDR_SPEED_QDR (4)
146#define QEDR_SPEED_FDR10 (8)
147#define QEDR_SPEED_FDR (16)
148#define QEDR_SPEED_EDR (32)
149
150static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
151 u8 *ib_width)
152{
153 switch (speed) {
154 case 1000:
155 *ib_speed = QEDR_SPEED_SDR;
156 *ib_width = IB_WIDTH_1X;
157 break;
158 case 10000:
159 *ib_speed = QEDR_SPEED_QDR;
160 *ib_width = IB_WIDTH_1X;
161 break;
162
163 case 20000:
164 *ib_speed = QEDR_SPEED_DDR;
165 *ib_width = IB_WIDTH_4X;
166 break;
167
168 case 25000:
169 *ib_speed = QEDR_SPEED_EDR;
170 *ib_width = IB_WIDTH_1X;
171 break;
172
173 case 40000:
174 *ib_speed = QEDR_SPEED_QDR;
175 *ib_width = IB_WIDTH_4X;
176 break;
177
178 case 50000:
179 *ib_speed = QEDR_SPEED_QDR;
180 *ib_width = IB_WIDTH_4X;
181 break;
182
183 case 100000:
184 *ib_speed = QEDR_SPEED_EDR;
185 *ib_width = IB_WIDTH_4X;
186 break;
187
188 default:
189 /* Unsupported */
190 *ib_speed = QEDR_SPEED_SDR;
191 *ib_width = IB_WIDTH_1X;
192 }
193}
194
195int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
196{
197 struct qedr_dev *dev;
198 struct qed_rdma_port *rdma_port;
199
200 dev = get_qedr_dev(ibdev);
201 if (port > 1) {
202 DP_ERR(dev, "invalid_port=0x%x\n", port);
203 return -EINVAL;
204 }
205
206 if (!dev->rdma_ctx) {
207 DP_ERR(dev, "rdma_ctx is NULL\n");
208 return -EINVAL;
209 }
210
211 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300212
Or Gerlitzc4550c62017-01-24 13:02:39 +0200213 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300214 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
215 attr->state = IB_PORT_ACTIVE;
216 attr->phys_state = 5;
217 } else {
218 attr->state = IB_PORT_DOWN;
219 attr->phys_state = 3;
220 }
221 attr->max_mtu = IB_MTU_4096;
222 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
223 attr->lid = 0;
224 attr->lmc = 0;
225 attr->sm_lid = 0;
226 attr->sm_sl = 0;
227 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
Kalderon, Michalf5b1b172017-07-26 14:41:53 +0300228 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
229 attr->gid_tbl_len = 1;
230 attr->pkey_tbl_len = 1;
231 } else {
232 attr->gid_tbl_len = QEDR_MAX_SGID;
233 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
234 }
Ram Amraniac1b36e2016-10-10 13:15:32 +0300235 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
236 attr->qkey_viol_cntr = 0;
237 get_link_speed_and_width(rdma_port->link_speed,
238 &attr->active_speed, &attr->active_width);
239 attr->max_msg_sz = rdma_port->max_msg_size;
240 attr->max_vl_num = 4;
241
242 return 0;
243}
244
245int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
246 struct ib_port_modify *props)
247{
248 struct qedr_dev *dev;
249
250 dev = get_qedr_dev(ibdev);
251 if (port > 1) {
252 DP_ERR(dev, "invalid_port=0x%x\n", port);
253 return -EINVAL;
254 }
255
256 return 0;
257}
258
259static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
260 unsigned long len)
261{
262 struct qedr_mm *mm;
263
264 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
265 if (!mm)
266 return -ENOMEM;
267
268 mm->key.phy_addr = phy_addr;
269 /* This function might be called with a length which is not a multiple
270 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
271 * forces this granularity by increasing the requested size if needed.
272 * When qedr_mmap is called, it will search the list with the updated
273 * length as a key. To prevent search failures, the length is rounded up
274 * in advance to PAGE_SIZE.
275 */
276 mm->key.len = roundup(len, PAGE_SIZE);
277 INIT_LIST_HEAD(&mm->entry);
278
279 mutex_lock(&uctx->mm_list_lock);
280 list_add(&mm->entry, &uctx->mm_head);
281 mutex_unlock(&uctx->mm_list_lock);
282
283 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
284 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
285 (unsigned long long)mm->key.phy_addr,
286 (unsigned long)mm->key.len, uctx);
287
288 return 0;
289}
290
291static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
292 unsigned long len)
293{
294 bool found = false;
295 struct qedr_mm *mm;
296
297 mutex_lock(&uctx->mm_list_lock);
298 list_for_each_entry(mm, &uctx->mm_head, entry) {
299 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
300 continue;
301
302 found = true;
303 break;
304 }
305 mutex_unlock(&uctx->mm_list_lock);
306 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
307 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
308 mm->key.phy_addr, mm->key.len, uctx, found);
309
310 return found;
311}
312
313struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
314 struct ib_udata *udata)
315{
316 int rc;
317 struct qedr_ucontext *ctx;
318 struct qedr_alloc_ucontext_resp uresp;
319 struct qedr_dev *dev = get_qedr_dev(ibdev);
320 struct qed_rdma_add_user_out_params oparams;
321
322 if (!udata)
323 return ERR_PTR(-EFAULT);
324
325 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
326 if (!ctx)
327 return ERR_PTR(-ENOMEM);
328
329 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
330 if (rc) {
331 DP_ERR(dev,
332 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
333 rc);
334 goto err;
335 }
336
337 ctx->dpi = oparams.dpi;
338 ctx->dpi_addr = oparams.dpi_addr;
339 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
340 ctx->dpi_size = oparams.dpi_size;
341 INIT_LIST_HEAD(&ctx->mm_head);
342 mutex_init(&ctx->mm_list_lock);
343
344 memset(&uresp, 0, sizeof(uresp));
345
Amrani, Ramad84dad2017-06-26 19:05:05 +0300346 uresp.dpm_enabled = dev->user_dpm_enabled;
Amrani, Ram67cbe352017-06-26 19:05:06 +0300347 uresp.wids_enabled = 1;
348 uresp.wid_count = oparams.wid_count;
Ram Amraniac1b36e2016-10-10 13:15:32 +0300349 uresp.db_pa = ctx->dpi_phys_addr;
350 uresp.db_size = ctx->dpi_size;
351 uresp.max_send_wr = dev->attr.max_sqe;
352 uresp.max_recv_wr = dev->attr.max_rqe;
353 uresp.max_srq_wr = dev->attr.max_srq_wr;
354 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
355 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
356 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
357 uresp.max_cqes = QEDR_MAX_CQES;
358
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300359 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amraniac1b36e2016-10-10 13:15:32 +0300360 if (rc)
361 goto err;
362
363 ctx->dev = dev;
364
365 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
366 if (rc)
367 goto err;
368
369 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
370 &ctx->ibucontext);
371 return &ctx->ibucontext;
372
373err:
374 kfree(ctx);
375 return ERR_PTR(rc);
376}
377
378int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
379{
380 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
381 struct qedr_mm *mm, *tmp;
382 int status = 0;
383
384 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
385 uctx);
386 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
387
388 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
389 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
390 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
391 mm->key.phy_addr, mm->key.len, uctx);
392 list_del(&mm->entry);
393 kfree(mm);
394 }
395
396 kfree(uctx);
397 return status;
398}
399
400int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
401{
402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
403 struct qedr_dev *dev = get_qedr_dev(context->device);
404 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
405 u64 unmapped_db = dev->db_phys_addr;
406 unsigned long len = (vma->vm_end - vma->vm_start);
407 int rc = 0;
408 bool found;
409
410 DP_DEBUG(dev, QEDR_MSG_INIT,
411 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
412 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
413 if (vma->vm_start & (PAGE_SIZE - 1)) {
414 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
415 vma->vm_start);
416 return -EINVAL;
417 }
418
419 found = qedr_search_mmap(ucontext, vm_page, len);
420 if (!found) {
421 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
422 vma->vm_pgoff);
423 return -EINVAL;
424 }
425
426 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
427
428 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
429 dev->db_size))) {
430 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
431 if (vma->vm_flags & VM_READ) {
432 DP_ERR(dev, "Trying to map doorbell bar for read\n");
433 return -EPERM;
434 }
435
436 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
437
438 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
439 PAGE_SIZE, vma->vm_page_prot);
440 } else {
441 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
442 rc = remap_pfn_range(vma, vma->vm_start,
443 vma->vm_pgoff, len, vma->vm_page_prot);
444 }
445 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
446 return rc;
447}
Ram Amrania7efd772016-10-10 13:15:33 +0300448
449struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
450 struct ib_ucontext *context, struct ib_udata *udata)
451{
452 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300453 struct qedr_pd *pd;
454 u16 pd_id;
455 int rc;
456
457 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
458 (udata && context) ? "User Lib" : "Kernel");
459
460 if (!dev->rdma_ctx) {
Colin Ian King847cb1a2017-08-24 09:25:53 +0100461 DP_ERR(dev, "invalid RDMA context\n");
Ram Amrania7efd772016-10-10 13:15:33 +0300462 return ERR_PTR(-EINVAL);
463 }
464
465 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
466 if (!pd)
467 return ERR_PTR(-ENOMEM);
468
Ram Amrani9c1e0222017-01-24 13:51:42 +0200469 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
470 if (rc)
471 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300472
Ram Amrania7efd772016-10-10 13:15:33 +0300473 pd->pd_id = pd_id;
474
475 if (udata && context) {
Jason Gunthorpe57939022018-04-04 20:58:13 -0600476 struct qedr_alloc_pd_uresp uresp = {
477 .pd_id = pd_id,
478 };
Ram Amrani9c1e0222017-01-24 13:51:42 +0200479
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300480 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200481 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300482 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200483 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
484 goto err;
485 }
486
487 pd->uctx = get_qedr_ucontext(context);
488 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300489 }
490
491 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200492
493err:
494 kfree(pd);
495 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300496}
497
498int qedr_dealloc_pd(struct ib_pd *ibpd)
499{
500 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
501 struct qedr_pd *pd = get_qedr_pd(ibpd);
502
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100503 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300504 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100505 return -EINVAL;
506 }
Ram Amrania7efd772016-10-10 13:15:33 +0300507
508 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
509 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
510
511 kfree(pd);
512
513 return 0;
514}
515
516static void qedr_free_pbl(struct qedr_dev *dev,
517 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
518{
519 struct pci_dev *pdev = dev->pdev;
520 int i;
521
522 for (i = 0; i < pbl_info->num_pbls; i++) {
523 if (!pbl[i].va)
524 continue;
525 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
526 pbl[i].va, pbl[i].pa);
527 }
528
529 kfree(pbl);
530}
531
532#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
533#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
534
535#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
536#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
537#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
538
539static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
540 struct qedr_pbl_info *pbl_info,
541 gfp_t flags)
542{
543 struct pci_dev *pdev = dev->pdev;
544 struct qedr_pbl *pbl_table;
545 dma_addr_t *pbl_main_tbl;
546 dma_addr_t pa;
547 void *va;
548 int i;
549
550 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
551 if (!pbl_table)
552 return ERR_PTR(-ENOMEM);
553
554 for (i = 0; i < pbl_info->num_pbls; i++) {
Himanshu Jha7bced912017-12-31 18:01:03 +0530555 va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
556 &pa, flags);
Ram Amrania7efd772016-10-10 13:15:33 +0300557 if (!va)
558 goto err;
559
Ram Amrania7efd772016-10-10 13:15:33 +0300560 pbl_table[i].va = va;
561 pbl_table[i].pa = pa;
562 }
563
564 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
565 * the first one with physical pointers to all of the rest
566 */
567 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
568 for (i = 0; i < pbl_info->num_pbls - 1; i++)
569 pbl_main_tbl[i] = pbl_table[i + 1].pa;
570
571 return pbl_table;
572
573err:
574 for (i--; i >= 0; i--)
575 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
576 pbl_table[i].va, pbl_table[i].pa);
577
578 qedr_free_pbl(dev, pbl_info, pbl_table);
579
580 return ERR_PTR(-ENOMEM);
581}
582
583static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
584 struct qedr_pbl_info *pbl_info,
585 u32 num_pbes, int two_layer_capable)
586{
587 u32 pbl_capacity;
588 u32 pbl_size;
589 u32 num_pbls;
590
591 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
592 if (num_pbes > MAX_PBES_TWO_LAYER) {
593 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
594 num_pbes);
595 return -EINVAL;
596 }
597
598 /* calculate required pbl page size */
599 pbl_size = MIN_FW_PBL_PAGE_SIZE;
600 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
601 NUM_PBES_ON_PAGE(pbl_size);
602
603 while (pbl_capacity < num_pbes) {
604 pbl_size *= 2;
605 pbl_capacity = pbl_size / sizeof(u64);
606 pbl_capacity = pbl_capacity * pbl_capacity;
607 }
608
609 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
610 num_pbls++; /* One for the layer0 ( points to the pbls) */
611 pbl_info->two_layered = true;
612 } else {
613 /* One layered PBL */
614 num_pbls = 1;
615 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
616 roundup_pow_of_two((num_pbes * sizeof(u64))));
617 pbl_info->two_layered = false;
618 }
619
620 pbl_info->num_pbls = num_pbls;
621 pbl_info->pbl_size = pbl_size;
622 pbl_info->num_pbes = num_pbes;
623
624 DP_DEBUG(dev, QEDR_MSG_MR,
625 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
626 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
627
628 return 0;
629}
630
631static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
632 struct qedr_pbl *pbl,
Ram Amranie57bb6b2017-06-05 16:32:27 +0300633 struct qedr_pbl_info *pbl_info, u32 pg_shift)
Ram Amrania7efd772016-10-10 13:15:33 +0300634{
635 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300636 u32 fw_pg_cnt, fw_pg_per_umem_pg;
Ram Amrania7efd772016-10-10 13:15:33 +0300637 struct qedr_pbl *pbl_tbl;
638 struct scatterlist *sg;
639 struct regpair *pbe;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300640 u64 pg_addr;
Ram Amrania7efd772016-10-10 13:15:33 +0300641 int entry;
Ram Amrania7efd772016-10-10 13:15:33 +0300642
643 if (!pbl_info->num_pbes)
644 return;
645
646 /* If we have a two layered pbl, the first pbl points to the rest
647 * of the pbls and the first entry lays on the second pbl in the table
648 */
649 if (pbl_info->two_layered)
650 pbl_tbl = &pbl[1];
651 else
652 pbl_tbl = pbl;
653
654 pbe = (struct regpair *)pbl_tbl->va;
655 if (!pbe) {
656 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
657 return;
658 }
659
660 pbe_cnt = 0;
661
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300662 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300663
Ram Amranie57bb6b2017-06-05 16:32:27 +0300664 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
665
Ram Amrania7efd772016-10-10 13:15:33 +0300666 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
667 pages = sg_dma_len(sg) >> shift;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300668 pg_addr = sg_dma_address(sg);
Ram Amrania7efd772016-10-10 13:15:33 +0300669 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
Ram Amranie57bb6b2017-06-05 16:32:27 +0300670 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
671 pbe->lo = cpu_to_le32(pg_addr);
672 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
Ram Amrania7efd772016-10-10 13:15:33 +0300673
Ram Amranie57bb6b2017-06-05 16:32:27 +0300674 pg_addr += BIT(pg_shift);
675 pbe_cnt++;
676 total_num_pbes++;
677 pbe++;
Ram Amrania7efd772016-10-10 13:15:33 +0300678
Ram Amranie57bb6b2017-06-05 16:32:27 +0300679 if (total_num_pbes == pbl_info->num_pbes)
680 return;
681
682 /* If the given pbl is full storing the pbes,
683 * move to next pbl.
684 */
685 if (pbe_cnt ==
686 (pbl_info->pbl_size / sizeof(u64))) {
687 pbl_tbl++;
688 pbe = (struct regpair *)pbl_tbl->va;
689 pbe_cnt = 0;
690 }
691
692 fw_pg_cnt++;
Ram Amrania7efd772016-10-10 13:15:33 +0300693 }
694 }
695 }
696}
697
698static int qedr_copy_cq_uresp(struct qedr_dev *dev,
699 struct qedr_cq *cq, struct ib_udata *udata)
700{
701 struct qedr_create_cq_uresp uresp;
702 int rc;
703
704 memset(&uresp, 0, sizeof(uresp));
705
706 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
707 uresp.icid = cq->icid;
708
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300709 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrania7efd772016-10-10 13:15:33 +0300710 if (rc)
711 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
712
713 return rc;
714}
715
716static void consume_cqe(struct qedr_cq *cq)
717{
718 if (cq->latest_cqe == cq->toggle_cqe)
719 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
720
721 cq->latest_cqe = qed_chain_consume(&cq->pbl);
722}
723
724static inline int qedr_align_cq_entries(int entries)
725{
726 u64 size, aligned_size;
727
728 /* We allocate an extra entry that we don't report to the FW. */
729 size = (entries + 1) * QEDR_CQE_SIZE;
730 aligned_size = ALIGN(size, PAGE_SIZE);
731
732 return aligned_size / QEDR_CQE_SIZE;
733}
734
735static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
736 struct qedr_dev *dev,
737 struct qedr_userq *q,
738 u64 buf_addr, size_t buf_len,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300739 int access, int dmasync,
740 int alloc_and_init)
Ram Amrania7efd772016-10-10 13:15:33 +0300741{
Ram Amranie57bb6b2017-06-05 16:32:27 +0300742 u32 fw_pages;
Ram Amrania7efd772016-10-10 13:15:33 +0300743 int rc;
744
745 q->buf_addr = buf_addr;
746 q->buf_len = buf_len;
747 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
748 if (IS_ERR(q->umem)) {
749 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
750 PTR_ERR(q->umem));
751 return PTR_ERR(q->umem);
752 }
753
Ram Amranie57bb6b2017-06-05 16:32:27 +0300754 fw_pages = ib_umem_page_count(q->umem) <<
755 (q->umem->page_shift - FW_PAGE_SHIFT);
756
757 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
Ram Amrania7efd772016-10-10 13:15:33 +0300758 if (rc)
759 goto err0;
760
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300761 if (alloc_and_init) {
762 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
763 if (IS_ERR(q->pbl_tbl)) {
764 rc = PTR_ERR(q->pbl_tbl);
765 goto err0;
766 }
Ram Amranie57bb6b2017-06-05 16:32:27 +0300767 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
768 FW_PAGE_SHIFT);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300769 } else {
770 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
Dan Carpenter89fd2572017-08-25 11:18:39 +0300771 if (!q->pbl_tbl) {
772 rc = -ENOMEM;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300773 goto err0;
Dan Carpenter89fd2572017-08-25 11:18:39 +0300774 }
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300775 }
Ram Amrania7efd772016-10-10 13:15:33 +0300776
777 return 0;
778
779err0:
780 ib_umem_release(q->umem);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300781 q->umem = NULL;
Ram Amrania7efd772016-10-10 13:15:33 +0300782
783 return rc;
784}
785
786static inline void qedr_init_cq_params(struct qedr_cq *cq,
787 struct qedr_ucontext *ctx,
788 struct qedr_dev *dev, int vector,
789 int chain_entries, int page_cnt,
790 u64 pbl_ptr,
791 struct qed_rdma_create_cq_in_params
792 *params)
793{
794 memset(params, 0, sizeof(*params));
795 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
796 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
797 params->cnq_id = vector;
798 params->cq_size = chain_entries - 1;
799 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
800 params->pbl_num_pages = page_cnt;
801 params->pbl_ptr = pbl_ptr;
802 params->pbl_two_level = 0;
803}
804
805static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
806{
Ram Amrania7efd772016-10-10 13:15:33 +0300807 cq->db.data.agg_flags = flags;
808 cq->db.data.value = cpu_to_le32(cons);
809 writeq(cq->db.raw, cq->db_addr);
810
811 /* Make sure write would stick */
812 mmiowb();
813}
814
815int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
816{
817 struct qedr_cq *cq = get_qedr_cq(ibcq);
818 unsigned long sflags;
Amrani, Ram4dd72632017-04-27 13:35:34 +0300819 struct qedr_dev *dev;
820
821 dev = get_qedr_dev(ibcq->device);
822
823 if (cq->destroyed) {
824 DP_ERR(dev,
825 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
826 cq, cq->icid);
827 return -EINVAL;
828 }
829
Ram Amrania7efd772016-10-10 13:15:33 +0300830
831 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
832 return 0;
833
834 spin_lock_irqsave(&cq->cq_lock, sflags);
835
836 cq->arm_flags = 0;
837
838 if (flags & IB_CQ_SOLICITED)
839 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
840
841 if (flags & IB_CQ_NEXT_COMP)
842 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
843
844 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
845
846 spin_unlock_irqrestore(&cq->cq_lock, sflags);
847
848 return 0;
849}
850
851struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
852 const struct ib_cq_init_attr *attr,
853 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
854{
855 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
856 struct qed_rdma_destroy_cq_out_params destroy_oparams;
857 struct qed_rdma_destroy_cq_in_params destroy_iparams;
858 struct qedr_dev *dev = get_qedr_dev(ibdev);
859 struct qed_rdma_create_cq_in_params params;
860 struct qedr_create_cq_ureq ureq;
861 int vector = attr->comp_vector;
862 int entries = attr->cqe;
863 struct qedr_cq *cq;
864 int chain_entries;
865 int page_cnt;
866 u64 pbl_ptr;
867 u16 icid;
868 int rc;
869
870 DP_DEBUG(dev, QEDR_MSG_INIT,
871 "create_cq: called from %s. entries=%d, vector=%d\n",
872 udata ? "User Lib" : "Kernel", entries, vector);
873
874 if (entries > QEDR_MAX_CQES) {
875 DP_ERR(dev,
876 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
877 entries, QEDR_MAX_CQES);
878 return ERR_PTR(-EINVAL);
879 }
880
881 chain_entries = qedr_align_cq_entries(entries);
882 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
883
884 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
885 if (!cq)
886 return ERR_PTR(-ENOMEM);
887
888 if (udata) {
889 memset(&ureq, 0, sizeof(ureq));
890 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
891 DP_ERR(dev,
892 "create cq: problem copying data from user space\n");
893 goto err0;
894 }
895
896 if (!ureq.len) {
897 DP_ERR(dev,
898 "create cq: cannot create a cq with 0 entries\n");
899 goto err0;
900 }
901
902 cq->cq_type = QEDR_CQ_TYPE_USER;
903
904 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300905 ureq.len, IB_ACCESS_LOCAL_WRITE,
906 1, 1);
Ram Amrania7efd772016-10-10 13:15:33 +0300907 if (rc)
908 goto err0;
909
910 pbl_ptr = cq->q.pbl_tbl->pa;
911 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200912
913 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300914 } else {
915 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
916
917 rc = dev->ops->common->chain_alloc(dev->cdev,
918 QED_CHAIN_USE_TO_CONSUME,
919 QED_CHAIN_MODE_PBL,
920 QED_CHAIN_CNT_TYPE_U32,
921 chain_entries,
922 sizeof(union rdma_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +0300923 &cq->pbl, NULL);
Ram Amrania7efd772016-10-10 13:15:33 +0300924 if (rc)
925 goto err1;
926
927 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
928 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200929 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300930 }
931
932 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
933 pbl_ptr, &params);
934
935 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
936 if (rc)
937 goto err2;
938
939 cq->icid = icid;
940 cq->sig = QEDR_CQ_MAGIC_NUMBER;
941 spin_lock_init(&cq->cq_lock);
942
943 if (ib_ctx) {
944 rc = qedr_copy_cq_uresp(dev, cq, udata);
945 if (rc)
946 goto err3;
947 } else {
948 /* Generate doorbell address. */
949 cq->db_addr = dev->db_addr +
950 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
951 cq->db.data.icid = cq->icid;
952 cq->db.data.params = DB_AGG_CMD_SET <<
953 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
954
955 /* point to the very last element, passing it we will toggle */
956 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
957 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
958 cq->latest_cqe = NULL;
959 consume_cqe(cq);
960 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
961 }
962
963 DP_DEBUG(dev, QEDR_MSG_CQ,
964 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
965 cq->icid, cq, params.cq_size);
966
967 return &cq->ibcq;
968
969err3:
970 destroy_iparams.icid = cq->icid;
971 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
972 &destroy_oparams);
973err2:
974 if (udata)
975 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
976 else
977 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
978err1:
979 if (udata)
980 ib_umem_release(cq->q.umem);
981err0:
982 kfree(cq);
983 return ERR_PTR(-EINVAL);
984}
985
986int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
987{
988 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
989 struct qedr_cq *cq = get_qedr_cq(ibcq);
990
991 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
992
993 return 0;
994}
995
Amrani, Ram4dd72632017-04-27 13:35:34 +0300996#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
997#define QEDR_DESTROY_CQ_ITER_DURATION (10)
998
Ram Amrania7efd772016-10-10 13:15:33 +0300999int qedr_destroy_cq(struct ib_cq *ibcq)
1000{
1001 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1002 struct qed_rdma_destroy_cq_out_params oparams;
1003 struct qed_rdma_destroy_cq_in_params iparams;
1004 struct qedr_cq *cq = get_qedr_cq(ibcq);
Amrani, Ram4dd72632017-04-27 13:35:34 +03001005 int iter;
Amrani, Ram942b3b22017-04-27 13:35:33 +03001006 int rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001007
Amrani, Ram942b3b22017-04-27 13:35:33 +03001008 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
Ram Amrania7efd772016-10-10 13:15:33 +03001009
Amrani, Ram4dd72632017-04-27 13:35:34 +03001010 cq->destroyed = 1;
1011
Ram Amrania7efd772016-10-10 13:15:33 +03001012 /* GSIs CQs are handled by driver, so they don't exist in the FW */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001013 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1014 goto done;
Amrani, Rama1211352016-12-22 14:40:34 +02001015
Amrani, Ram942b3b22017-04-27 13:35:33 +03001016 iparams.icid = cq->icid;
1017 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1018 if (rc)
1019 return rc;
1020
1021 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +03001022
1023 if (ibcq->uobject && ibcq->uobject->context) {
1024 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1025 ib_umem_release(cq->q.umem);
1026 }
1027
Amrani, Ram4dd72632017-04-27 13:35:34 +03001028 /* We don't want the IRQ handler to handle a non-existing CQ so we
1029 * wait until all CNQ interrupts, if any, are received. This will always
1030 * happen and will always happen very fast. If not, then a serious error
1031 * has occured. That is why we can use a long delay.
1032 * We spin for a short time so we don’t lose time on context switching
1033 * in case all the completions are handled in that span. Otherwise
1034 * we sleep for a while and check again. Since the CNQ may be
1035 * associated with (only) the current CPU we use msleep to allow the
1036 * current CPU to be freed.
1037 * The CNQ notification is increased in qedr_irq_handler().
1038 */
1039 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1040 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1041 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1042 iter--;
1043 }
1044
1045 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1046 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1047 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1048 iter--;
1049 }
1050
1051 if (oparams.num_cq_notif != cq->cnq_notif)
1052 goto err;
1053
1054 /* Note that we don't need to have explicit code to wait for the
1055 * completion of the event handler because it is invoked from the EQ.
1056 * Since the destroy CQ ramrod has also been received on the EQ we can
1057 * be certain that there's no event handler in process.
1058 */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001059done:
Amrani, Ram4dd72632017-04-27 13:35:34 +03001060 cq->sig = ~cq->sig;
1061
Ram Amrania7efd772016-10-10 13:15:33 +03001062 kfree(cq);
1063
1064 return 0;
Amrani, Ram4dd72632017-04-27 13:35:34 +03001065
1066err:
1067 DP_ERR(dev,
1068 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1069 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1070
1071 return -EINVAL;
Ram Amrania7efd772016-10-10 13:15:33 +03001072}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001073
1074static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1075 struct ib_qp_attr *attr,
1076 int attr_mask,
1077 struct qed_rdma_modify_qp_in_params
1078 *qp_params)
1079{
1080 enum rdma_network_type nw_type;
1081 struct ib_gid_attr gid_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001082 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001083 union ib_gid gid;
1084 u32 ipv4_addr;
1085 int rc = 0;
1086 int i;
1087
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001088 rc = ib_get_cached_gid(ibqp->device,
1089 rdma_ah_get_port_num(&attr->ah_attr),
1090 grh->sgid_index, &gid, &gid_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001091 if (rc)
1092 return rc;
1093
Parav Pandit3e44e0e2018-04-01 15:08:23 +03001094 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001095
Parav Pandit3e44e0e2018-04-01 15:08:23 +03001096 dev_put(gid_attr.ndev);
1097 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1098 switch (nw_type) {
1099 case RDMA_NETWORK_IPV6:
1100 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1101 sizeof(qp_params->sgid));
1102 memcpy(&qp_params->dgid.bytes[0],
1103 &grh->dgid,
1104 sizeof(qp_params->dgid));
1105 qp_params->roce_mode = ROCE_V2_IPV6;
1106 SET_FIELD(qp_params->modify_flags,
1107 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1108 break;
1109 case RDMA_NETWORK_IB:
1110 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1111 sizeof(qp_params->sgid));
1112 memcpy(&qp_params->dgid.bytes[0],
1113 &grh->dgid,
1114 sizeof(qp_params->dgid));
1115 qp_params->roce_mode = ROCE_V1;
1116 break;
1117 case RDMA_NETWORK_IPV4:
1118 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1119 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1120 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1121 qp_params->sgid.ipv4_addr = ipv4_addr;
1122 ipv4_addr =
1123 qedr_get_ipv4_from_gid(grh->dgid.raw);
1124 qp_params->dgid.ipv4_addr = ipv4_addr;
1125 SET_FIELD(qp_params->modify_flags,
1126 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1127 qp_params->roce_mode = ROCE_V2_IPV4;
1128 break;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001129 }
1130
1131 for (i = 0; i < 4; i++) {
1132 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1133 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1134 }
1135
1136 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1137 qp_params->vlan_id = 0;
1138
1139 return 0;
1140}
1141
Ram Amranicecbcdd2016-10-10 13:15:34 +03001142static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1143 struct ib_qp_init_attr *attrs)
1144{
1145 struct qedr_device_attr *qattr = &dev->attr;
1146
1147 /* QP0... attrs->qp_type == IB_QPT_GSI */
1148 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1149 DP_DEBUG(dev, QEDR_MSG_QP,
1150 "create qp: unsupported qp type=0x%x requested\n",
1151 attrs->qp_type);
1152 return -EINVAL;
1153 }
1154
1155 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1156 DP_ERR(dev,
1157 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1158 attrs->cap.max_send_wr, qattr->max_sqe);
1159 return -EINVAL;
1160 }
1161
1162 if (attrs->cap.max_inline_data > qattr->max_inline) {
1163 DP_ERR(dev,
1164 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1165 attrs->cap.max_inline_data, qattr->max_inline);
1166 return -EINVAL;
1167 }
1168
1169 if (attrs->cap.max_send_sge > qattr->max_sge) {
1170 DP_ERR(dev,
1171 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1172 attrs->cap.max_send_sge, qattr->max_sge);
1173 return -EINVAL;
1174 }
1175
1176 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1177 DP_ERR(dev,
1178 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1179 attrs->cap.max_recv_sge, qattr->max_sge);
1180 return -EINVAL;
1181 }
1182
1183 /* Unprivileged user space cannot create special QP */
1184 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1185 DP_ERR(dev,
1186 "create qp: userspace can't create special QPs of type=0x%x\n",
1187 attrs->qp_type);
1188 return -EINVAL;
1189 }
1190
1191 return 0;
1192}
1193
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001194static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1195 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001196 struct qedr_qp *qp)
1197{
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001198 /* iWARP requires two doorbells per RQ. */
1199 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1200 uresp->rq_db_offset =
1201 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1202 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1203 } else {
1204 uresp->rq_db_offset =
1205 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1206 }
1207
Ram Amranicecbcdd2016-10-10 13:15:34 +03001208 uresp->rq_icid = qp->icid;
1209}
1210
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001211static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1212 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001213 struct qedr_qp *qp)
1214{
1215 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001216
1217 /* iWARP uses the same cid for rq and sq */
1218 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1219 uresp->sq_icid = qp->icid;
1220 else
1221 uresp->sq_icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001222}
1223
1224static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1225 struct qedr_qp *qp, struct ib_udata *udata)
1226{
1227 struct qedr_create_qp_uresp uresp;
1228 int rc;
1229
1230 memset(&uresp, 0, sizeof(uresp));
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001231 qedr_copy_sq_uresp(dev, &uresp, qp);
1232 qedr_copy_rq_uresp(dev, &uresp, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001233
1234 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1235 uresp.qp_id = qp->qp_id;
1236
Amrani, Ramc75d3ec2017-06-26 19:05:04 +03001237 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranicecbcdd2016-10-10 13:15:34 +03001238 if (rc)
1239 DP_ERR(dev,
1240 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1241 qp->icid);
1242
1243 return rc;
1244}
1245
Amrani, Ramdf158562016-12-22 14:52:24 +02001246static void qedr_set_common_qp_params(struct qedr_dev *dev,
1247 struct qedr_qp *qp,
1248 struct qedr_pd *pd,
1249 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001250{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001251 spin_lock_init(&qp->q_lock);
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001252 atomic_set(&qp->refcnt, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001253 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001254 qp->qp_type = attrs->qp_type;
1255 qp->max_inline_data = attrs->cap.max_inline_data;
1256 qp->sq.max_sges = attrs->cap.max_send_sge;
1257 qp->state = QED_ROCE_QP_STATE_RESET;
1258 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1259 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1260 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1261 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001262 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001263
1264 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001265 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1266 qp->rq.max_sges, qp->rq_cq->icid);
1267 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001268 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1269 pd->pd_id, qp->qp_type, qp->max_inline_data,
1270 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1271 DP_DEBUG(dev, QEDR_MSG_QP,
1272 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1273 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001274}
1275
Amrani, Ramdf158562016-12-22 14:52:24 +02001276static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001277{
1278 qp->sq.db = dev->db_addr +
1279 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1280 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001281 qp->rq.db = dev->db_addr +
1282 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1283 qp->rq.db_data.data.icid = qp->icid;
1284}
1285
Amrani, Ramdf158562016-12-22 14:52:24 +02001286static inline void
1287qedr_init_common_qp_in_params(struct qedr_dev *dev,
1288 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001289 struct qedr_qp *qp,
1290 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001291 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001292 struct qed_rdma_create_qp_in_params *params)
1293{
Amrani, Ramdf158562016-12-22 14:52:24 +02001294 /* QP handle to be written in an async event */
1295 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1296 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001297
Amrani, Ramdf158562016-12-22 14:52:24 +02001298 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1299 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1300 params->pd = pd->pd_id;
1301 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1302 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1303 params->stats_queue = 0;
1304 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1305 params->srq_id = 0;
1306 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001307}
1308
Amrani, Ramdf158562016-12-22 14:52:24 +02001309static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001310{
Amrani, Ramdf158562016-12-22 14:52:24 +02001311 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1312 "qp=%p. "
1313 "sq_addr=0x%llx, "
1314 "sq_len=%zd, "
1315 "rq_addr=0x%llx, "
1316 "rq_len=%zd"
1317 "\n",
1318 qp,
1319 qp->usq.buf_addr,
1320 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1321}
1322
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001323static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
1324{
1325 int rc;
1326
1327 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1328 return 0;
1329
1330 idr_preload(GFP_KERNEL);
1331 spin_lock_irq(&dev->idr_lock);
1332
1333 rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
1334
1335 spin_unlock_irq(&dev->idr_lock);
1336 idr_preload_end();
1337
1338 return rc < 0 ? rc : 0;
1339}
1340
1341static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
1342{
1343 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1344 return;
1345
1346 spin_lock_irq(&dev->idr_lock);
1347 idr_remove(&dev->qpidr, id);
1348 spin_unlock_irq(&dev->idr_lock);
1349}
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001350
1351static inline void
1352qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1353 struct qedr_qp *qp,
1354 struct qed_rdma_create_qp_out_params *out_params)
1355{
1356 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1357 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1358
1359 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1360 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1361
1362 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1363 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1364
1365 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1366 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1367}
1368
Amrani, Ramdf158562016-12-22 14:52:24 +02001369static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1370{
1371 if (qp->usq.umem)
1372 ib_umem_release(qp->usq.umem);
1373 qp->usq.umem = NULL;
1374
1375 if (qp->urq.umem)
1376 ib_umem_release(qp->urq.umem);
1377 qp->urq.umem = NULL;
1378}
1379
1380static int qedr_create_user_qp(struct qedr_dev *dev,
1381 struct qedr_qp *qp,
1382 struct ib_pd *ibpd,
1383 struct ib_udata *udata,
1384 struct ib_qp_init_attr *attrs)
1385{
1386 struct qed_rdma_create_qp_in_params in_params;
1387 struct qed_rdma_create_qp_out_params out_params;
1388 struct qedr_pd *pd = get_qedr_pd(ibpd);
1389 struct ib_ucontext *ib_ctx = NULL;
Amrani, Ramdf158562016-12-22 14:52:24 +02001390 struct qedr_create_qp_ureq ureq;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001391 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001392 int rc = -EINVAL;
1393
1394 ib_ctx = ibpd->uobject->context;
Amrani, Ramdf158562016-12-22 14:52:24 +02001395
1396 memset(&ureq, 0, sizeof(ureq));
1397 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1398 if (rc) {
1399 DP_ERR(dev, "Problem copying data from user space\n");
1400 return rc;
1401 }
1402
1403 /* SQ - read access only (0), dma sync not required (0) */
1404 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001405 ureq.sq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001406 if (rc)
1407 return rc;
1408
1409 /* RQ - read access only (0), dma sync not required (0) */
1410 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001411 ureq.rq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001412 if (rc)
1413 return rc;
1414
1415 memset(&in_params, 0, sizeof(in_params));
1416 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1417 in_params.qp_handle_lo = ureq.qp_handle_lo;
1418 in_params.qp_handle_hi = ureq.qp_handle_hi;
1419 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1420 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1421 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1422 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1423
1424 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1425 &in_params, &out_params);
1426
1427 if (!qp->qed_qp) {
1428 rc = -ENOMEM;
1429 goto err1;
1430 }
1431
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001432 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1433 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1434
Amrani, Ramdf158562016-12-22 14:52:24 +02001435 qp->qp_id = out_params.qp_id;
1436 qp->icid = out_params.icid;
1437
1438 rc = qedr_copy_qp_uresp(dev, qp, udata);
1439 if (rc)
1440 goto err;
1441
1442 qedr_qp_user_print(dev, qp);
1443
1444 return 0;
1445err:
1446 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1447 if (rc)
1448 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1449
1450err1:
1451 qedr_cleanup_user(dev, qp);
1452 return rc;
1453}
1454
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001455static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1456{
1457 qp->sq.db = dev->db_addr +
1458 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1459 qp->sq.db_data.data.icid = qp->icid;
1460
1461 qp->rq.db = dev->db_addr +
1462 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1463 qp->rq.db_data.data.icid = qp->icid;
1464 qp->rq.iwarp_db2 = dev->db_addr +
1465 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1466 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1467 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1468}
1469
Amrani, Ramdf158562016-12-22 14:52:24 +02001470static int
1471qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1472 struct qedr_qp *qp,
1473 struct qed_rdma_create_qp_in_params *in_params,
1474 u32 n_sq_elems, u32 n_rq_elems)
1475{
1476 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001477 int rc;
1478
Ram Amranicecbcdd2016-10-10 13:15:34 +03001479 rc = dev->ops->common->chain_alloc(dev->cdev,
1480 QED_CHAIN_USE_TO_PRODUCE,
1481 QED_CHAIN_MODE_PBL,
1482 QED_CHAIN_CNT_TYPE_U32,
1483 n_sq_elems,
1484 QEDR_SQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001485 &qp->sq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001486
1487 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001488 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001489
Amrani, Ramdf158562016-12-22 14:52:24 +02001490 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1491 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001492
Ram Amranicecbcdd2016-10-10 13:15:34 +03001493 rc = dev->ops->common->chain_alloc(dev->cdev,
1494 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1495 QED_CHAIN_MODE_PBL,
1496 QED_CHAIN_CNT_TYPE_U32,
1497 n_rq_elems,
1498 QEDR_RQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001499 &qp->rq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001500 if (rc)
1501 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001502
Amrani, Ramdf158562016-12-22 14:52:24 +02001503 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1504 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001505
Amrani, Ramdf158562016-12-22 14:52:24 +02001506 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1507 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001508
Amrani, Ramdf158562016-12-22 14:52:24 +02001509 if (!qp->qed_qp)
1510 return -EINVAL;
1511
1512 qp->qp_id = out_params.qp_id;
1513 qp->icid = out_params.icid;
1514
1515 qedr_set_roce_db_info(dev, qp);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001516 return rc;
1517}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001518
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001519static int
1520qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1521 struct qedr_qp *qp,
1522 struct qed_rdma_create_qp_in_params *in_params,
1523 u32 n_sq_elems, u32 n_rq_elems)
1524{
1525 struct qed_rdma_create_qp_out_params out_params;
1526 struct qed_chain_ext_pbl ext_pbl;
1527 int rc;
1528
1529 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1530 QEDR_SQE_ELEMENT_SIZE,
1531 QED_CHAIN_MODE_PBL);
1532 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1533 QEDR_RQE_ELEMENT_SIZE,
1534 QED_CHAIN_MODE_PBL);
1535
1536 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1537 in_params, &out_params);
1538
1539 if (!qp->qed_qp)
1540 return -EINVAL;
1541
1542 /* Now we allocate the chain */
1543 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1544 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1545
1546 rc = dev->ops->common->chain_alloc(dev->cdev,
1547 QED_CHAIN_USE_TO_PRODUCE,
1548 QED_CHAIN_MODE_PBL,
1549 QED_CHAIN_CNT_TYPE_U32,
1550 n_sq_elems,
1551 QEDR_SQE_ELEMENT_SIZE,
1552 &qp->sq.pbl, &ext_pbl);
1553
1554 if (rc)
1555 goto err;
1556
1557 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1558 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1559
1560 rc = dev->ops->common->chain_alloc(dev->cdev,
1561 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1562 QED_CHAIN_MODE_PBL,
1563 QED_CHAIN_CNT_TYPE_U32,
1564 n_rq_elems,
1565 QEDR_RQE_ELEMENT_SIZE,
1566 &qp->rq.pbl, &ext_pbl);
1567
1568 if (rc)
1569 goto err;
1570
1571 qp->qp_id = out_params.qp_id;
1572 qp->icid = out_params.icid;
1573
1574 qedr_set_iwarp_db_info(dev, qp);
1575 return rc;
1576
1577err:
1578 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1579
1580 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001581}
1582
Amrani, Ramdf158562016-12-22 14:52:24 +02001583static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001584{
Amrani, Ramdf158562016-12-22 14:52:24 +02001585 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1586 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001587
Amrani, Ramdf158562016-12-22 14:52:24 +02001588 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1589 kfree(qp->rqe_wr_id);
1590}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001591
Amrani, Ramdf158562016-12-22 14:52:24 +02001592static int qedr_create_kernel_qp(struct qedr_dev *dev,
1593 struct qedr_qp *qp,
1594 struct ib_pd *ibpd,
1595 struct ib_qp_init_attr *attrs)
1596{
1597 struct qed_rdma_create_qp_in_params in_params;
1598 struct qedr_pd *pd = get_qedr_pd(ibpd);
1599 int rc = -EINVAL;
1600 u32 n_rq_elems;
1601 u32 n_sq_elems;
1602 u32 n_sq_entries;
1603
1604 memset(&in_params, 0, sizeof(in_params));
1605
1606 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1607 * the ring. The ring should allow at least a single WR, even if the
1608 * user requested none, due to allocation issues.
1609 * We should add an extra WR since the prod and cons indices of
1610 * wqe_wr_id are managed in such a way that the WQ is considered full
1611 * when (prod+1)%max_wr==cons. We currently don't do that because we
1612 * double the number of entries due an iSER issue that pushes far more
1613 * WRs than indicated. If we decline its ib_post_send() then we get
1614 * error prints in the dmesg we'd like to avoid.
1615 */
1616 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1617 dev->attr.max_sqe);
1618
1619 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1620 GFP_KERNEL);
1621 if (!qp->wqe_wr_id) {
1622 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1623 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001624 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001625
Amrani, Ramdf158562016-12-22 14:52:24 +02001626 /* QP handle to be written in CQE */
1627 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1628 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001629
Amrani, Ramdf158562016-12-22 14:52:24 +02001630 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1631 * the ring. There ring should allow at least a single WR, even if the
1632 * user requested none, due to allocation issues.
1633 */
1634 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1635
1636 /* Allocate driver internal RQ array */
1637 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1638 GFP_KERNEL);
1639 if (!qp->rqe_wr_id) {
1640 DP_ERR(dev,
1641 "create qp: failed RQ shadow memory allocation\n");
1642 kfree(qp->wqe_wr_id);
1643 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001644 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001645
Amrani, Ramdf158562016-12-22 14:52:24 +02001646 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001647
Amrani, Ramdf158562016-12-22 14:52:24 +02001648 n_sq_entries = attrs->cap.max_send_wr;
1649 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1650 n_sq_entries = max_t(u32, n_sq_entries, 1);
1651 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001652
Amrani, Ramdf158562016-12-22 14:52:24 +02001653 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1654
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001655 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1656 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1657 n_sq_elems, n_rq_elems);
1658 else
1659 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1660 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001661 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001662 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001663
1664 return rc;
1665}
1666
1667struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1668 struct ib_qp_init_attr *attrs,
1669 struct ib_udata *udata)
1670{
1671 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001672 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001673 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001674 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001675 int rc = 0;
1676
1677 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1678 udata ? "user library" : "kernel", pd);
1679
1680 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1681 if (rc)
1682 return ERR_PTR(rc);
1683
Wei Yongjun181d8012016-10-28 16:33:47 +00001684 if (attrs->srq)
1685 return ERR_PTR(-EINVAL);
1686
Ram Amranicecbcdd2016-10-10 13:15:34 +03001687 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001688 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1689 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001690 get_qedr_cq(attrs->send_cq),
1691 get_qedr_cq(attrs->send_cq)->icid,
1692 get_qedr_cq(attrs->recv_cq),
1693 get_qedr_cq(attrs->recv_cq)->icid);
1694
Amrani, Ramdf158562016-12-22 14:52:24 +02001695 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1696 if (!qp) {
1697 DP_ERR(dev, "create qp: failed allocating memory\n");
1698 return ERR_PTR(-ENOMEM);
1699 }
1700
1701 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001702
Ram Amrani04886772016-10-10 13:15:38 +03001703 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001704 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1705 if (IS_ERR(ibqp))
1706 kfree(qp);
1707 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001708 }
1709
Amrani, Ramdf158562016-12-22 14:52:24 +02001710 if (udata)
1711 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1712 else
1713 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001714
Amrani, Ramdf158562016-12-22 14:52:24 +02001715 if (rc)
1716 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001717
Ram Amranicecbcdd2016-10-10 13:15:34 +03001718 qp->ibqp.qp_num = qp->qp_id;
1719
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001720 rc = qedr_idr_add(dev, qp, qp->qp_id);
1721 if (rc)
1722 goto err;
1723
Ram Amranicecbcdd2016-10-10 13:15:34 +03001724 return &qp->ibqp;
1725
Amrani, Ramdf158562016-12-22 14:52:24 +02001726err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001727 kfree(qp);
1728
1729 return ERR_PTR(-EFAULT);
1730}
1731
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001732static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001733{
1734 switch (qp_state) {
1735 case QED_ROCE_QP_STATE_RESET:
1736 return IB_QPS_RESET;
1737 case QED_ROCE_QP_STATE_INIT:
1738 return IB_QPS_INIT;
1739 case QED_ROCE_QP_STATE_RTR:
1740 return IB_QPS_RTR;
1741 case QED_ROCE_QP_STATE_RTS:
1742 return IB_QPS_RTS;
1743 case QED_ROCE_QP_STATE_SQD:
1744 return IB_QPS_SQD;
1745 case QED_ROCE_QP_STATE_ERR:
1746 return IB_QPS_ERR;
1747 case QED_ROCE_QP_STATE_SQE:
1748 return IB_QPS_SQE;
1749 }
1750 return IB_QPS_ERR;
1751}
1752
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001753static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1754 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001755{
1756 switch (qp_state) {
1757 case IB_QPS_RESET:
1758 return QED_ROCE_QP_STATE_RESET;
1759 case IB_QPS_INIT:
1760 return QED_ROCE_QP_STATE_INIT;
1761 case IB_QPS_RTR:
1762 return QED_ROCE_QP_STATE_RTR;
1763 case IB_QPS_RTS:
1764 return QED_ROCE_QP_STATE_RTS;
1765 case IB_QPS_SQD:
1766 return QED_ROCE_QP_STATE_SQD;
1767 case IB_QPS_ERR:
1768 return QED_ROCE_QP_STATE_ERR;
1769 default:
1770 return QED_ROCE_QP_STATE_ERR;
1771 }
1772}
1773
1774static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1775{
1776 qed_chain_reset(&qph->pbl);
1777 qph->prod = 0;
1778 qph->cons = 0;
1779 qph->wqe_cons = 0;
1780 qph->db_data.data.value = cpu_to_le16(0);
1781}
1782
1783static int qedr_update_qp_state(struct qedr_dev *dev,
1784 struct qedr_qp *qp,
1785 enum qed_roce_qp_state new_state)
1786{
1787 int status = 0;
1788
1789 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001790 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001791
1792 switch (qp->state) {
1793 case QED_ROCE_QP_STATE_RESET:
1794 switch (new_state) {
1795 case QED_ROCE_QP_STATE_INIT:
1796 qp->prev_wqe_size = 0;
1797 qedr_reset_qp_hwq_info(&qp->sq);
1798 qedr_reset_qp_hwq_info(&qp->rq);
1799 break;
1800 default:
1801 status = -EINVAL;
1802 break;
1803 };
1804 break;
1805 case QED_ROCE_QP_STATE_INIT:
1806 switch (new_state) {
1807 case QED_ROCE_QP_STATE_RTR:
1808 /* Update doorbell (in case post_recv was
1809 * done before move to RTR)
1810 */
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001811
1812 if (rdma_protocol_roce(&dev->ibdev, 1)) {
Kalderon, Michal09c4854f2018-04-05 09:59:29 +03001813 writel(qp->rq.db_data.raw, qp->rq.db);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001814 /* Make sure write takes effect */
1815 mmiowb();
1816 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001817 break;
1818 case QED_ROCE_QP_STATE_ERR:
1819 break;
1820 default:
1821 /* Invalid state change. */
1822 status = -EINVAL;
1823 break;
1824 };
1825 break;
1826 case QED_ROCE_QP_STATE_RTR:
1827 /* RTR->XXX */
1828 switch (new_state) {
1829 case QED_ROCE_QP_STATE_RTS:
1830 break;
1831 case QED_ROCE_QP_STATE_ERR:
1832 break;
1833 default:
1834 /* Invalid state change. */
1835 status = -EINVAL;
1836 break;
1837 };
1838 break;
1839 case QED_ROCE_QP_STATE_RTS:
1840 /* RTS->XXX */
1841 switch (new_state) {
1842 case QED_ROCE_QP_STATE_SQD:
1843 break;
1844 case QED_ROCE_QP_STATE_ERR:
1845 break;
1846 default:
1847 /* Invalid state change. */
1848 status = -EINVAL;
1849 break;
1850 };
1851 break;
1852 case QED_ROCE_QP_STATE_SQD:
1853 /* SQD->XXX */
1854 switch (new_state) {
1855 case QED_ROCE_QP_STATE_RTS:
1856 case QED_ROCE_QP_STATE_ERR:
1857 break;
1858 default:
1859 /* Invalid state change. */
1860 status = -EINVAL;
1861 break;
1862 };
1863 break;
1864 case QED_ROCE_QP_STATE_ERR:
1865 /* ERR->XXX */
1866 switch (new_state) {
1867 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001868 if ((qp->rq.prod != qp->rq.cons) ||
1869 (qp->sq.prod != qp->sq.cons)) {
1870 DP_NOTICE(dev,
1871 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1872 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1873 qp->sq.cons);
1874 status = -EINVAL;
1875 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001876 break;
1877 default:
1878 status = -EINVAL;
1879 break;
1880 };
1881 break;
1882 default:
1883 status = -EINVAL;
1884 break;
1885 };
1886
1887 return status;
1888}
1889
1890int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1891 int attr_mask, struct ib_udata *udata)
1892{
1893 struct qedr_qp *qp = get_qedr_qp(ibqp);
1894 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1895 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001896 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001897 enum ib_qp_state old_qp_state, new_qp_state;
1898 int rc = 0;
1899
1900 DP_DEBUG(dev, QEDR_MSG_QP,
1901 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1902 attr->qp_state);
1903
1904 old_qp_state = qedr_get_ibqp_state(qp->state);
1905 if (attr_mask & IB_QP_STATE)
1906 new_qp_state = attr->qp_state;
1907 else
1908 new_qp_state = old_qp_state;
1909
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001910 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1911 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
1912 ibqp->qp_type, attr_mask,
1913 IB_LINK_LAYER_ETHERNET)) {
1914 DP_ERR(dev,
1915 "modify qp: invalid attribute mask=0x%x specified for\n"
1916 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1917 attr_mask, qp->qp_id, ibqp->qp_type,
1918 old_qp_state, new_qp_state);
1919 rc = -EINVAL;
1920 goto err;
1921 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001922 }
1923
1924 /* Translate the masks... */
1925 if (attr_mask & IB_QP_STATE) {
1926 SET_FIELD(qp_params.modify_flags,
1927 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1928 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1929 }
1930
1931 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1932 qp_params.sqd_async = true;
1933
1934 if (attr_mask & IB_QP_PKEY_INDEX) {
1935 SET_FIELD(qp_params.modify_flags,
1936 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1937 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1938 rc = -EINVAL;
1939 goto err;
1940 }
1941
1942 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1943 }
1944
1945 if (attr_mask & IB_QP_QKEY)
1946 qp->qkey = attr->qkey;
1947
1948 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1949 SET_FIELD(qp_params.modify_flags,
1950 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1951 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1952 IB_ACCESS_REMOTE_READ;
1953 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1954 IB_ACCESS_REMOTE_WRITE;
1955 qp_params.incoming_atomic_en = attr->qp_access_flags &
1956 IB_ACCESS_REMOTE_ATOMIC;
1957 }
1958
1959 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1960 if (attr_mask & IB_QP_PATH_MTU) {
1961 if (attr->path_mtu < IB_MTU_256 ||
1962 attr->path_mtu > IB_MTU_4096) {
1963 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1964 rc = -EINVAL;
1965 goto err;
1966 }
1967 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1968 ib_mtu_enum_to_int(iboe_get_mtu
1969 (dev->ndev->mtu)));
1970 }
1971
1972 if (!qp->mtu) {
1973 qp->mtu =
1974 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1975 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1976 }
1977
1978 SET_FIELD(qp_params.modify_flags,
1979 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1980
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001981 qp_params.traffic_class_tos = grh->traffic_class;
1982 qp_params.flow_label = grh->flow_label;
1983 qp_params.hop_limit_ttl = grh->hop_limit;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001984
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001985 qp->sgid_idx = grh->sgid_index;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001986
1987 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1988 if (rc) {
1989 DP_ERR(dev,
1990 "modify qp: problems with GID index %d (rc=%d)\n",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001991 grh->sgid_index, rc);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001992 return rc;
1993 }
1994
1995 rc = qedr_get_dmac(dev, &attr->ah_attr,
1996 qp_params.remote_mac_addr);
1997 if (rc)
1998 return rc;
1999
2000 qp_params.use_local_mac = true;
2001 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2002
2003 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2004 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2005 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2006 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2007 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2008 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2009 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2010 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002011
2012 qp_params.mtu = qp->mtu;
2013 qp_params.lb_indication = false;
2014 }
2015
2016 if (!qp_params.mtu) {
2017 /* Stay with current MTU */
2018 if (qp->mtu)
2019 qp_params.mtu = qp->mtu;
2020 else
2021 qp_params.mtu =
2022 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2023 }
2024
2025 if (attr_mask & IB_QP_TIMEOUT) {
2026 SET_FIELD(qp_params.modify_flags,
2027 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2028
2029 qp_params.ack_timeout = attr->timeout;
2030 if (attr->timeout) {
2031 u32 temp;
2032
2033 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
2034 /* FW requires [msec] */
2035 qp_params.ack_timeout = temp;
2036 } else {
2037 /* Infinite */
2038 qp_params.ack_timeout = 0;
2039 }
2040 }
2041 if (attr_mask & IB_QP_RETRY_CNT) {
2042 SET_FIELD(qp_params.modify_flags,
2043 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2044 qp_params.retry_cnt = attr->retry_cnt;
2045 }
2046
2047 if (attr_mask & IB_QP_RNR_RETRY) {
2048 SET_FIELD(qp_params.modify_flags,
2049 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2050 qp_params.rnr_retry_cnt = attr->rnr_retry;
2051 }
2052
2053 if (attr_mask & IB_QP_RQ_PSN) {
2054 SET_FIELD(qp_params.modify_flags,
2055 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2056 qp_params.rq_psn = attr->rq_psn;
2057 qp->rq_psn = attr->rq_psn;
2058 }
2059
2060 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2061 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2062 rc = -EINVAL;
2063 DP_ERR(dev,
2064 "unsupported max_rd_atomic=%d, supported=%d\n",
2065 attr->max_rd_atomic,
2066 dev->attr.max_qp_req_rd_atomic_resc);
2067 goto err;
2068 }
2069
2070 SET_FIELD(qp_params.modify_flags,
2071 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2072 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2073 }
2074
2075 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2076 SET_FIELD(qp_params.modify_flags,
2077 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2078 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2079 }
2080
2081 if (attr_mask & IB_QP_SQ_PSN) {
2082 SET_FIELD(qp_params.modify_flags,
2083 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2084 qp_params.sq_psn = attr->sq_psn;
2085 qp->sq_psn = attr->sq_psn;
2086 }
2087
2088 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2089 if (attr->max_dest_rd_atomic >
2090 dev->attr.max_qp_resp_rd_atomic_resc) {
2091 DP_ERR(dev,
2092 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2093 attr->max_dest_rd_atomic,
2094 dev->attr.max_qp_resp_rd_atomic_resc);
2095
2096 rc = -EINVAL;
2097 goto err;
2098 }
2099
2100 SET_FIELD(qp_params.modify_flags,
2101 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2102 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2103 }
2104
2105 if (attr_mask & IB_QP_DEST_QPN) {
2106 SET_FIELD(qp_params.modify_flags,
2107 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2108
2109 qp_params.dest_qp = attr->dest_qp_num;
2110 qp->dest_qp_num = attr->dest_qp_num;
2111 }
2112
2113 if (qp->qp_type != IB_QPT_GSI)
2114 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2115 qp->qed_qp, &qp_params);
2116
2117 if (attr_mask & IB_QP_STATE) {
2118 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02002119 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002120 qp->state = qp_params.new_state;
2121 }
2122
2123err:
2124 return rc;
2125}
2126
2127static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2128{
2129 int ib_qp_acc_flags = 0;
2130
2131 if (params->incoming_rdma_write_en)
2132 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2133 if (params->incoming_rdma_read_en)
2134 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2135 if (params->incoming_atomic_en)
2136 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2137 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2138 return ib_qp_acc_flags;
2139}
2140
2141int qedr_query_qp(struct ib_qp *ibqp,
2142 struct ib_qp_attr *qp_attr,
2143 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2144{
2145 struct qed_rdma_query_qp_out_params params;
2146 struct qedr_qp *qp = get_qedr_qp(ibqp);
2147 struct qedr_dev *dev = qp->dev;
2148 int rc = 0;
2149
2150 memset(&params, 0, sizeof(params));
2151
2152 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2153 if (rc)
2154 goto err;
2155
2156 memset(qp_attr, 0, sizeof(*qp_attr));
2157 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2158
2159 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2160 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002161 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002162 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2163 qp_attr->rq_psn = params.rq_psn;
2164 qp_attr->sq_psn = params.sq_psn;
2165 qp_attr->dest_qp_num = params.dest_qp;
2166
2167 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2168
2169 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2170 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2171 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2172 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002173 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002174 qp_init_attr->cap = qp_attr->cap;
2175
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04002176 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002177 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2178 params.flow_label, qp->sgid_idx,
2179 params.hop_limit_ttl, params.traffic_class_tos);
2180 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2181 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2182 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002183 qp_attr->timeout = params.timeout;
2184 qp_attr->rnr_retry = params.rnr_retry;
2185 qp_attr->retry_cnt = params.retry_cnt;
2186 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2187 qp_attr->pkey_index = params.pkey_index;
2188 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002189 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2190 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002191 qp_attr->alt_pkey_index = 0;
2192 qp_attr->alt_port_num = 0;
2193 qp_attr->alt_timeout = 0;
2194 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2195
2196 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2197 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2198 qp_attr->max_rd_atomic = params.max_rd_atomic;
2199 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2200
2201 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2202 qp_attr->cap.max_inline_data);
2203
2204err:
2205 return rc;
2206}
2207
Bart Van Assche00899852017-10-11 10:49:17 -07002208static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
Amrani, Ramdf158562016-12-22 14:52:24 +02002209{
2210 int rc = 0;
2211
2212 if (qp->qp_type != IB_QPT_GSI) {
2213 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2214 if (rc)
2215 return rc;
2216 }
2217
2218 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2219 qedr_cleanup_user(dev, qp);
2220 else
2221 qedr_cleanup_kernel(dev, qp);
2222
2223 return 0;
2224}
2225
Ram Amranicecbcdd2016-10-10 13:15:34 +03002226int qedr_destroy_qp(struct ib_qp *ibqp)
2227{
2228 struct qedr_qp *qp = get_qedr_qp(ibqp);
2229 struct qedr_dev *dev = qp->dev;
2230 struct ib_qp_attr attr;
2231 int attr_mask = 0;
2232 int rc = 0;
2233
2234 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2235 qp, qp->qp_type);
2236
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002237 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2238 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2239 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2240 (qp->state != QED_ROCE_QP_STATE_INIT)) {
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002241
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002242 attr.qp_state = IB_QPS_ERR;
2243 attr_mask |= IB_QP_STATE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002244
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002245 /* Change the QP state to ERROR */
2246 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2247 }
Kalderon, Michale411e052017-07-26 14:41:56 +03002248 } else {
2249 /* Wait for the connect/accept to complete */
2250 if (qp->ep) {
2251 int wait_count = 1;
2252
2253 while (qp->ep->during_connect) {
2254 DP_DEBUG(dev, QEDR_MSG_QP,
2255 "Still in during connect/accept\n");
2256
2257 msleep(100);
2258 if (wait_count++ > 200) {
2259 DP_NOTICE(dev,
2260 "during connect timeout\n");
2261 break;
2262 }
2263 }
2264 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002265 }
2266
Amrani, Ramdf158562016-12-22 14:52:24 +02002267 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002268 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002269
Amrani, Ramdf158562016-12-22 14:52:24 +02002270 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002271
Kalderon, Michalde0089e2017-07-26 14:41:55 +03002272 if (atomic_dec_and_test(&qp->refcnt)) {
2273 qedr_idr_remove(dev, qp->qp_id);
2274 kfree(qp);
2275 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002276 return rc;
2277}
Ram Amranie0290cc2016-10-10 13:15:35 +03002278
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002279struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002280 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002281{
2282 struct qedr_ah *ah;
2283
2284 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2285 if (!ah)
2286 return ERR_PTR(-ENOMEM);
2287
2288 ah->attr = *attr;
2289
2290 return &ah->ibah;
2291}
2292
2293int qedr_destroy_ah(struct ib_ah *ibah)
2294{
2295 struct qedr_ah *ah = get_qedr_ah(ibah);
2296
2297 kfree(ah);
2298 return 0;
2299}
2300
Ram Amranie0290cc2016-10-10 13:15:35 +03002301static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2302{
2303 struct qedr_pbl *pbl, *tmp;
2304
2305 if (info->pbl_table)
2306 list_add_tail(&info->pbl_table->list_entry,
2307 &info->free_pbl_list);
2308
2309 if (!list_empty(&info->inuse_pbl_list))
2310 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2311
2312 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2313 list_del(&pbl->list_entry);
2314 qedr_free_pbl(dev, &info->pbl_info, pbl);
2315 }
2316}
2317
2318static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2319 size_t page_list_len, bool two_layered)
2320{
2321 struct qedr_pbl *tmp;
2322 int rc;
2323
2324 INIT_LIST_HEAD(&info->free_pbl_list);
2325 INIT_LIST_HEAD(&info->inuse_pbl_list);
2326
2327 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2328 page_list_len, two_layered);
2329 if (rc)
2330 goto done;
2331
2332 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002333 if (IS_ERR(info->pbl_table)) {
2334 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002335 goto done;
2336 }
2337
2338 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2339 &info->pbl_table->pa);
2340
2341 /* in usual case we use 2 PBLs, so we add one to free
2342 * list and allocating another one
2343 */
2344 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002345 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002346 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2347 goto done;
2348 }
2349
2350 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2351
2352 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2353
2354done:
2355 if (rc)
2356 free_mr_info(dev, info);
2357
2358 return rc;
2359}
2360
2361struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2362 u64 usr_addr, int acc, struct ib_udata *udata)
2363{
2364 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2365 struct qedr_mr *mr;
2366 struct qedr_pd *pd;
2367 int rc = -ENOMEM;
2368
2369 pd = get_qedr_pd(ibpd);
2370 DP_DEBUG(dev, QEDR_MSG_MR,
2371 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2372 pd->pd_id, start, len, usr_addr, acc);
2373
2374 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2375 return ERR_PTR(-EINVAL);
2376
2377 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2378 if (!mr)
2379 return ERR_PTR(rc);
2380
2381 mr->type = QEDR_MR_USER;
2382
2383 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2384 if (IS_ERR(mr->umem)) {
2385 rc = -EFAULT;
2386 goto err0;
2387 }
2388
2389 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2390 if (rc)
2391 goto err1;
2392
2393 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
Ram Amranie57bb6b2017-06-05 16:32:27 +03002394 &mr->info.pbl_info, mr->umem->page_shift);
Ram Amranie0290cc2016-10-10 13:15:35 +03002395
2396 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2397 if (rc) {
2398 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2399 goto err1;
2400 }
2401
2402 /* Index only, 18 bit long, lkey = itid << 8 | key */
2403 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2404 mr->hw_mr.key = 0;
2405 mr->hw_mr.pd = pd->pd_id;
2406 mr->hw_mr.local_read = 1;
2407 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2408 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2409 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2410 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2411 mr->hw_mr.mw_bind = false;
2412 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2413 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2414 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002415 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002416 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2417 mr->hw_mr.length = len;
2418 mr->hw_mr.vaddr = usr_addr;
2419 mr->hw_mr.zbva = false;
2420 mr->hw_mr.phy_mr = false;
2421 mr->hw_mr.dma_mr = false;
2422
2423 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2424 if (rc) {
2425 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2426 goto err2;
2427 }
2428
2429 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2430 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2431 mr->hw_mr.remote_atomic)
2432 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2433
2434 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2435 mr->ibmr.lkey);
2436 return &mr->ibmr;
2437
2438err2:
2439 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2440err1:
2441 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2442err0:
2443 kfree(mr);
2444 return ERR_PTR(rc);
2445}
2446
2447int qedr_dereg_mr(struct ib_mr *ib_mr)
2448{
2449 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2450 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2451 int rc = 0;
2452
2453 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2454 if (rc)
2455 return rc;
2456
2457 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2458
2459 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2460 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2461
2462 /* it could be user registered memory. */
2463 if (mr->umem)
2464 ib_umem_release(mr->umem);
2465
2466 kfree(mr);
2467
2468 return rc;
2469}
2470
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002471static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2472 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002473{
2474 struct qedr_pd *pd = get_qedr_pd(ibpd);
2475 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2476 struct qedr_mr *mr;
2477 int rc = -ENOMEM;
2478
2479 DP_DEBUG(dev, QEDR_MSG_MR,
2480 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2481 max_page_list_len);
2482
2483 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2484 if (!mr)
2485 return ERR_PTR(rc);
2486
2487 mr->dev = dev;
2488 mr->type = QEDR_MR_FRMR;
2489
2490 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2491 if (rc)
2492 goto err0;
2493
2494 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2495 if (rc) {
2496 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2497 goto err0;
2498 }
2499
2500 /* Index only, 18 bit long, lkey = itid << 8 | key */
2501 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2502 mr->hw_mr.key = 0;
2503 mr->hw_mr.pd = pd->pd_id;
2504 mr->hw_mr.local_read = 1;
2505 mr->hw_mr.local_write = 0;
2506 mr->hw_mr.remote_read = 0;
2507 mr->hw_mr.remote_write = 0;
2508 mr->hw_mr.remote_atomic = 0;
2509 mr->hw_mr.mw_bind = false;
2510 mr->hw_mr.pbl_ptr = 0;
2511 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2512 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2513 mr->hw_mr.fbo = 0;
2514 mr->hw_mr.length = 0;
2515 mr->hw_mr.vaddr = 0;
2516 mr->hw_mr.zbva = false;
2517 mr->hw_mr.phy_mr = true;
2518 mr->hw_mr.dma_mr = false;
2519
2520 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2521 if (rc) {
2522 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2523 goto err1;
2524 }
2525
2526 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2527 mr->ibmr.rkey = mr->ibmr.lkey;
2528
2529 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2530 return mr;
2531
2532err1:
2533 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2534err0:
2535 kfree(mr);
2536 return ERR_PTR(rc);
2537}
2538
2539struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2540 enum ib_mr_type mr_type, u32 max_num_sg)
2541{
Ram Amranie0290cc2016-10-10 13:15:35 +03002542 struct qedr_mr *mr;
2543
2544 if (mr_type != IB_MR_TYPE_MEM_REG)
2545 return ERR_PTR(-EINVAL);
2546
2547 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2548
2549 if (IS_ERR(mr))
2550 return ERR_PTR(-EINVAL);
2551
Ram Amranie0290cc2016-10-10 13:15:35 +03002552 return &mr->ibmr;
2553}
2554
2555static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2556{
2557 struct qedr_mr *mr = get_qedr_mr(ibmr);
2558 struct qedr_pbl *pbl_table;
2559 struct regpair *pbe;
2560 u32 pbes_in_page;
2561
2562 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2563 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2564 return -ENOMEM;
2565 }
2566
2567 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2568 mr->npages, addr);
2569
2570 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2571 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2572 pbe = (struct regpair *)pbl_table->va;
2573 pbe += mr->npages % pbes_in_page;
2574 pbe->lo = cpu_to_le32((u32)addr);
2575 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2576
2577 mr->npages++;
2578
2579 return 0;
2580}
2581
2582static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2583{
2584 int work = info->completed - info->completed_handled - 1;
2585
2586 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2587 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2588 struct qedr_pbl *pbl;
2589
2590 /* Free all the page list that are possible to be freed
2591 * (all the ones that were invalidated), under the assumption
2592 * that if an FMR was completed successfully that means that
2593 * if there was an invalidate operation before it also ended
2594 */
2595 pbl = list_first_entry(&info->inuse_pbl_list,
2596 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002597 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002598 info->completed_handled++;
2599 }
2600}
2601
2602int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2603 int sg_nents, unsigned int *sg_offset)
2604{
2605 struct qedr_mr *mr = get_qedr_mr(ibmr);
2606
2607 mr->npages = 0;
2608
2609 handle_completed_mrs(mr->dev, &mr->info);
2610 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2611}
2612
2613struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2614{
2615 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2616 struct qedr_pd *pd = get_qedr_pd(ibpd);
2617 struct qedr_mr *mr;
2618 int rc;
2619
2620 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2621 if (!mr)
2622 return ERR_PTR(-ENOMEM);
2623
2624 mr->type = QEDR_MR_DMA;
2625
2626 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2627 if (rc) {
2628 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2629 goto err1;
2630 }
2631
2632 /* index only, 18 bit long, lkey = itid << 8 | key */
2633 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2634 mr->hw_mr.pd = pd->pd_id;
2635 mr->hw_mr.local_read = 1;
2636 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2637 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2638 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2639 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2640 mr->hw_mr.dma_mr = true;
2641
2642 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2643 if (rc) {
2644 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2645 goto err2;
2646 }
2647
2648 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2649 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2650 mr->hw_mr.remote_atomic)
2651 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2652
2653 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2654 return &mr->ibmr;
2655
2656err2:
2657 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2658err1:
2659 kfree(mr);
2660 return ERR_PTR(rc);
2661}
Ram Amraniafa0e132016-10-10 13:15:36 +03002662
2663static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2664{
2665 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2666}
2667
2668static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2669{
2670 int i, len = 0;
2671
2672 for (i = 0; i < num_sge; i++)
2673 len += sg_list[i].length;
2674
2675 return len;
2676}
2677
2678static void swap_wqe_data64(u64 *p)
2679{
2680 int i;
2681
2682 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2683 *p = cpu_to_be64(cpu_to_le64(*p));
2684}
2685
2686static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2687 struct qedr_qp *qp, u8 *wqe_size,
2688 struct ib_send_wr *wr,
2689 struct ib_send_wr **bad_wr, u8 *bits,
2690 u8 bit)
2691{
2692 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2693 char *seg_prt, *wqe;
2694 int i, seg_siz;
2695
2696 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2697 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2698 *bad_wr = wr;
2699 return 0;
2700 }
2701
2702 if (!data_size)
2703 return data_size;
2704
2705 *bits |= bit;
2706
2707 seg_prt = NULL;
2708 wqe = NULL;
2709 seg_siz = 0;
2710
2711 /* Copy data inline */
2712 for (i = 0; i < wr->num_sge; i++) {
2713 u32 len = wr->sg_list[i].length;
2714 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2715
2716 while (len > 0) {
2717 u32 cur;
2718
2719 /* New segment required */
2720 if (!seg_siz) {
2721 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2722 seg_prt = wqe;
2723 seg_siz = sizeof(struct rdma_sq_common_wqe);
2724 (*wqe_size)++;
2725 }
2726
2727 /* Calculate currently allowed length */
2728 cur = min_t(u32, len, seg_siz);
2729 memcpy(seg_prt, src, cur);
2730
2731 /* Update segment variables */
2732 seg_prt += cur;
2733 seg_siz -= cur;
2734
2735 /* Update sge variables */
2736 src += cur;
2737 len -= cur;
2738
2739 /* Swap fully-completed segments */
2740 if (!seg_siz)
2741 swap_wqe_data64((u64 *)wqe);
2742 }
2743 }
2744
2745 /* swap last not completed segment */
2746 if (seg_siz)
2747 swap_wqe_data64((u64 *)wqe);
2748
2749 return data_size;
2750}
2751
2752#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2753 do { \
2754 DMA_REGPAIR_LE(sge->addr, vaddr); \
2755 (sge)->length = cpu_to_le32(vlength); \
2756 (sge)->flags = cpu_to_le32(vflags); \
2757 } while (0)
2758
2759#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2760 do { \
2761 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2762 (hdr)->num_sges = num_sge; \
2763 } while (0)
2764
2765#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2766 do { \
2767 DMA_REGPAIR_LE(sge->addr, vaddr); \
2768 (sge)->length = cpu_to_le32(vlength); \
2769 (sge)->l_key = cpu_to_le32(vlkey); \
2770 } while (0)
2771
2772static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2773 struct ib_send_wr *wr)
2774{
2775 u32 data_size = 0;
2776 int i;
2777
2778 for (i = 0; i < wr->num_sge; i++) {
2779 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2780
2781 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2782 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2783 sge->length = cpu_to_le32(wr->sg_list[i].length);
2784 data_size += wr->sg_list[i].length;
2785 }
2786
2787 if (wqe_size)
2788 *wqe_size += wr->num_sge;
2789
2790 return data_size;
2791}
2792
2793static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2794 struct qedr_qp *qp,
2795 struct rdma_sq_rdma_wqe_1st *rwqe,
2796 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2797 struct ib_send_wr *wr,
2798 struct ib_send_wr **bad_wr)
2799{
2800 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2801 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2802
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002803 if (wr->send_flags & IB_SEND_INLINE &&
2804 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2805 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002806 u8 flags = 0;
2807
2808 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2809 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2810 bad_wr, &rwqe->flags, flags);
2811 }
2812
2813 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2814}
2815
2816static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2817 struct qedr_qp *qp,
2818 struct rdma_sq_send_wqe_1st *swqe,
2819 struct rdma_sq_send_wqe_2st *swqe2,
2820 struct ib_send_wr *wr,
2821 struct ib_send_wr **bad_wr)
2822{
2823 memset(swqe2, 0, sizeof(*swqe2));
2824 if (wr->send_flags & IB_SEND_INLINE) {
2825 u8 flags = 0;
2826
2827 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2828 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2829 bad_wr, &swqe->flags, flags);
2830 }
2831
2832 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2833}
2834
2835static int qedr_prepare_reg(struct qedr_qp *qp,
2836 struct rdma_sq_fmr_wqe_1st *fwqe1,
2837 struct ib_reg_wr *wr)
2838{
2839 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2840 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2841
2842 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2843 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2844 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2845 fwqe1->l_key = wr->key;
2846
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002847 fwqe2->access_ctrl = 0;
2848
Ram Amraniafa0e132016-10-10 13:15:36 +03002849 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2850 !!(wr->access & IB_ACCESS_REMOTE_READ));
2851 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2852 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2853 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2854 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2855 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2856 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2857 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2858 fwqe2->fmr_ctrl = 0;
2859
2860 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2861 ilog2(mr->ibmr.page_size) - 12);
2862
2863 fwqe2->length_hi = 0;
2864 fwqe2->length_lo = mr->ibmr.length;
2865 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2866 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2867
2868 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2869
2870 return 0;
2871}
2872
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002873static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002874{
2875 switch (opcode) {
2876 case IB_WR_RDMA_WRITE:
2877 case IB_WR_RDMA_WRITE_WITH_IMM:
2878 return IB_WC_RDMA_WRITE;
2879 case IB_WR_SEND_WITH_IMM:
2880 case IB_WR_SEND:
2881 case IB_WR_SEND_WITH_INV:
2882 return IB_WC_SEND;
2883 case IB_WR_RDMA_READ:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03002884 case IB_WR_RDMA_READ_WITH_INV:
Ram Amraniafa0e132016-10-10 13:15:36 +03002885 return IB_WC_RDMA_READ;
2886 case IB_WR_ATOMIC_CMP_AND_SWP:
2887 return IB_WC_COMP_SWAP;
2888 case IB_WR_ATOMIC_FETCH_AND_ADD:
2889 return IB_WC_FETCH_ADD;
2890 case IB_WR_REG_MR:
2891 return IB_WC_REG_MR;
2892 case IB_WR_LOCAL_INV:
2893 return IB_WC_LOCAL_INV;
2894 default:
2895 return IB_WC_SEND;
2896 }
2897}
2898
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002899static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002900{
2901 int wq_is_full, err_wr, pbl_is_full;
2902 struct qedr_dev *dev = qp->dev;
2903
2904 /* prevent SQ overflow and/or processing of a bad WR */
2905 err_wr = wr->num_sge > qp->sq.max_sges;
2906 wq_is_full = qedr_wq_is_full(&qp->sq);
2907 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2908 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2909 if (wq_is_full || err_wr || pbl_is_full) {
2910 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2911 DP_ERR(dev,
2912 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2913 qp);
2914 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2915 }
2916
2917 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2918 DP_ERR(dev,
2919 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2920 qp);
2921 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2922 }
2923
2924 if (pbl_is_full &&
2925 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2926 DP_ERR(dev,
2927 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2928 qp);
2929 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2930 }
2931 return false;
2932 }
2933 return true;
2934}
2935
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002936static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03002937 struct ib_send_wr **bad_wr)
2938{
2939 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2940 struct qedr_qp *qp = get_qedr_qp(ibqp);
2941 struct rdma_sq_atomic_wqe_1st *awqe1;
2942 struct rdma_sq_atomic_wqe_2nd *awqe2;
2943 struct rdma_sq_atomic_wqe_3rd *awqe3;
2944 struct rdma_sq_send_wqe_2st *swqe2;
2945 struct rdma_sq_local_inv_wqe *iwqe;
2946 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2947 struct rdma_sq_send_wqe_1st *swqe;
2948 struct rdma_sq_rdma_wqe_1st *rwqe;
2949 struct rdma_sq_fmr_wqe_1st *fwqe1;
2950 struct rdma_sq_common_wqe *wqe;
2951 u32 length;
2952 int rc = 0;
2953 bool comp;
2954
2955 if (!qedr_can_post_send(qp, wr)) {
2956 *bad_wr = wr;
2957 return -ENOMEM;
2958 }
2959
2960 wqe = qed_chain_produce(&qp->sq.pbl);
2961 qp->wqe_wr_id[qp->sq.prod].signaled =
2962 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2963
2964 wqe->flags = 0;
2965 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2966 !!(wr->send_flags & IB_SEND_SOLICITED));
2967 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2968 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2969 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2970 !!(wr->send_flags & IB_SEND_FENCE));
2971 wqe->prev_wqe_size = qp->prev_wqe_size;
2972
2973 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2974
2975 switch (wr->opcode) {
2976 case IB_WR_SEND_WITH_IMM:
Kalderon, Michal551e1c62018-03-05 10:50:11 +02002977 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
2978 rc = -EINVAL;
2979 *bad_wr = wr;
2980 break;
2981 }
Ram Amraniafa0e132016-10-10 13:15:36 +03002982 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2983 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2984 swqe->wqe_size = 2;
2985 swqe2 = qed_chain_produce(&qp->sq.pbl);
2986
Jason Gunthorpe7bed7eb2018-01-11 14:43:07 -07002987 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
Ram Amraniafa0e132016-10-10 13:15:36 +03002988 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2989 wr, bad_wr);
2990 swqe->length = cpu_to_le32(length);
2991 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2992 qp->prev_wqe_size = swqe->wqe_size;
2993 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2994 break;
2995 case IB_WR_SEND:
2996 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2997 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2998
2999 swqe->wqe_size = 2;
3000 swqe2 = qed_chain_produce(&qp->sq.pbl);
3001 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3002 wr, bad_wr);
3003 swqe->length = cpu_to_le32(length);
3004 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3005 qp->prev_wqe_size = swqe->wqe_size;
3006 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3007 break;
3008 case IB_WR_SEND_WITH_INV:
3009 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3010 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3011 swqe2 = qed_chain_produce(&qp->sq.pbl);
3012 swqe->wqe_size = 2;
3013 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3014 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3015 wr, bad_wr);
3016 swqe->length = cpu_to_le32(length);
3017 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3018 qp->prev_wqe_size = swqe->wqe_size;
3019 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3020 break;
3021
3022 case IB_WR_RDMA_WRITE_WITH_IMM:
Kalderon, Michal551e1c62018-03-05 10:50:11 +02003023 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3024 rc = -EINVAL;
3025 *bad_wr = wr;
3026 break;
3027 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003028 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3029 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3030
3031 rwqe->wqe_size = 2;
3032 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3033 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3034 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3035 wr, bad_wr);
3036 rwqe->length = cpu_to_le32(length);
3037 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3038 qp->prev_wqe_size = rwqe->wqe_size;
3039 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3040 break;
3041 case IB_WR_RDMA_WRITE:
3042 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3043 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3044
3045 rwqe->wqe_size = 2;
3046 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3047 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3048 wr, bad_wr);
3049 rwqe->length = cpu_to_le32(length);
3050 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3051 qp->prev_wqe_size = rwqe->wqe_size;
3052 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3053 break;
3054 case IB_WR_RDMA_READ_WITH_INV:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03003055 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
Bart Van Assche1b8a708b2017-10-11 10:49:19 -07003056 /* fallthrough -- same is identical to RDMA READ */
Ram Amraniafa0e132016-10-10 13:15:36 +03003057
3058 case IB_WR_RDMA_READ:
3059 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3060 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3061
3062 rwqe->wqe_size = 2;
3063 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3064 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3065 wr, bad_wr);
3066 rwqe->length = cpu_to_le32(length);
3067 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3068 qp->prev_wqe_size = rwqe->wqe_size;
3069 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3070 break;
3071
3072 case IB_WR_ATOMIC_CMP_AND_SWP:
3073 case IB_WR_ATOMIC_FETCH_AND_ADD:
3074 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3075 awqe1->wqe_size = 4;
3076
3077 awqe2 = qed_chain_produce(&qp->sq.pbl);
3078 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3079 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3080
3081 awqe3 = qed_chain_produce(&qp->sq.pbl);
3082
3083 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3084 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3085 DMA_REGPAIR_LE(awqe3->swap_data,
3086 atomic_wr(wr)->compare_add);
3087 } else {
3088 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3089 DMA_REGPAIR_LE(awqe3->swap_data,
3090 atomic_wr(wr)->swap);
3091 DMA_REGPAIR_LE(awqe3->cmp_data,
3092 atomic_wr(wr)->compare_add);
3093 }
3094
3095 qedr_prepare_sq_sges(qp, NULL, wr);
3096
3097 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3098 qp->prev_wqe_size = awqe1->wqe_size;
3099 break;
3100
3101 case IB_WR_LOCAL_INV:
3102 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3103 iwqe->wqe_size = 1;
3104
3105 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3106 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3107 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3108 qp->prev_wqe_size = iwqe->wqe_size;
3109 break;
3110 case IB_WR_REG_MR:
3111 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3112 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3113 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3114 fwqe1->wqe_size = 2;
3115
3116 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3117 if (rc) {
3118 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3119 *bad_wr = wr;
3120 break;
3121 }
3122
3123 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3124 qp->prev_wqe_size = fwqe1->wqe_size;
3125 break;
3126 default:
3127 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3128 rc = -EINVAL;
3129 *bad_wr = wr;
3130 break;
3131 }
3132
3133 if (*bad_wr) {
3134 u16 value;
3135
3136 /* Restore prod to its position before
3137 * this WR was processed
3138 */
3139 value = le16_to_cpu(qp->sq.db_data.data.value);
3140 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3141
3142 /* Restore prev_wqe_size */
3143 qp->prev_wqe_size = wqe->prev_wqe_size;
3144 rc = -EINVAL;
3145 DP_ERR(dev, "POST SEND FAILED\n");
3146 }
3147
3148 return rc;
3149}
3150
3151int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3152 struct ib_send_wr **bad_wr)
3153{
3154 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3155 struct qedr_qp *qp = get_qedr_qp(ibqp);
3156 unsigned long flags;
3157 int rc = 0;
3158
3159 *bad_wr = NULL;
3160
Ram Amrani04886772016-10-10 13:15:38 +03003161 if (qp->qp_type == IB_QPT_GSI)
3162 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3163
Ram Amraniafa0e132016-10-10 13:15:36 +03003164 spin_lock_irqsave(&qp->q_lock, flags);
3165
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003166 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3167 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3168 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3169 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3170 spin_unlock_irqrestore(&qp->q_lock, flags);
3171 *bad_wr = wr;
3172 DP_DEBUG(dev, QEDR_MSG_CQ,
3173 "QP in wrong state! QP icid=0x%x state %d\n",
3174 qp->icid, qp->state);
3175 return -EINVAL;
3176 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003177 }
3178
Ram Amraniafa0e132016-10-10 13:15:36 +03003179 while (wr) {
3180 rc = __qedr_post_send(ibqp, wr, bad_wr);
3181 if (rc)
3182 break;
3183
3184 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3185
3186 qedr_inc_sw_prod(&qp->sq);
3187
3188 qp->sq.db_data.data.value++;
3189
3190 wr = wr->next;
3191 }
3192
3193 /* Trigger doorbell
3194 * If there was a failure in the first WR then it will be triggered in
3195 * vane. However this is not harmful (as long as the producer value is
3196 * unchanged). For performance reasons we avoid checking for this
3197 * redundant doorbell.
Kalderon, Michal09c4854f2018-04-05 09:59:29 +03003198 *
3199 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3200 * soon as we give the doorbell, we could get a completion
3201 * for this wr, therefore we need to make sure that the
3202 * memory is updated before giving the doorbell.
3203 * During qedr_poll_cq, rmb is called before accessing the
3204 * cqe. This covers for the smp_rmb as well.
Ram Amraniafa0e132016-10-10 13:15:36 +03003205 */
Kalderon, Michal09c4854f2018-04-05 09:59:29 +03003206 smp_wmb();
3207 writel(qp->sq.db_data.raw, qp->sq.db);
Ram Amraniafa0e132016-10-10 13:15:36 +03003208
3209 /* Make sure write sticks */
3210 mmiowb();
3211
3212 spin_unlock_irqrestore(&qp->q_lock, flags);
3213
3214 return rc;
3215}
3216
3217int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3218 struct ib_recv_wr **bad_wr)
3219{
3220 struct qedr_qp *qp = get_qedr_qp(ibqp);
3221 struct qedr_dev *dev = qp->dev;
3222 unsigned long flags;
3223 int status = 0;
3224
Ram Amrani04886772016-10-10 13:15:38 +03003225 if (qp->qp_type == IB_QPT_GSI)
3226 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3227
Ram Amraniafa0e132016-10-10 13:15:36 +03003228 spin_lock_irqsave(&qp->q_lock, flags);
3229
Amrani, Ram922d9a42016-12-22 14:40:38 +02003230 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003231 spin_unlock_irqrestore(&qp->q_lock, flags);
3232 *bad_wr = wr;
3233 return -EINVAL;
3234 }
3235
3236 while (wr) {
3237 int i;
3238
3239 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3240 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3241 wr->num_sge > qp->rq.max_sges) {
3242 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3243 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3244 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3245 qp->rq.max_sges);
3246 status = -ENOMEM;
3247 *bad_wr = wr;
3248 break;
3249 }
3250 for (i = 0; i < wr->num_sge; i++) {
3251 u32 flags = 0;
3252 struct rdma_rq_sge *rqe =
3253 qed_chain_produce(&qp->rq.pbl);
3254
3255 /* First one must include the number
3256 * of SGE in the list
3257 */
3258 if (!i)
3259 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3260 wr->num_sge);
3261
3262 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3263 wr->sg_list[i].lkey);
3264
3265 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3266 wr->sg_list[i].length, flags);
3267 }
3268
3269 /* Special case of no sges. FW requires between 1-4 sges...
3270 * in this case we need to post 1 sge with length zero. this is
3271 * because rdma write with immediate consumes an RQ.
3272 */
3273 if (!wr->num_sge) {
3274 u32 flags = 0;
3275 struct rdma_rq_sge *rqe =
3276 qed_chain_produce(&qp->rq.pbl);
3277
3278 /* First one must include the number
3279 * of SGE in the list
3280 */
3281 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3282 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3283
3284 RQ_SGE_SET(rqe, 0, 0, flags);
3285 i = 1;
3286 }
3287
3288 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3289 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3290
3291 qedr_inc_sw_prod(&qp->rq);
3292
Kalderon, Michal09c4854f2018-04-05 09:59:29 +03003293 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3294 * soon as we give the doorbell, we could get a completion
3295 * for this wr, therefore we need to make sure that the
3296 * memory is update before giving the doorbell.
3297 * During qedr_poll_cq, rmb is called before accessing the
3298 * cqe. This covers for the smp_rmb as well.
3299 */
3300 smp_wmb();
Ram Amraniafa0e132016-10-10 13:15:36 +03003301
3302 qp->rq.db_data.data.value++;
3303
3304 writel(qp->rq.db_data.raw, qp->rq.db);
3305
3306 /* Make sure write sticks */
3307 mmiowb();
3308
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003309 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3310 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3311 mmiowb(); /* for second doorbell */
3312 }
3313
Ram Amraniafa0e132016-10-10 13:15:36 +03003314 wr = wr->next;
3315 }
3316
3317 spin_unlock_irqrestore(&qp->q_lock, flags);
3318
3319 return status;
3320}
3321
3322static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3323{
3324 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3325
3326 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3327 cq->pbl_toggle;
3328}
3329
3330static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3331{
3332 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3333 struct qedr_qp *qp;
3334
3335 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3336 resp_cqe->qp_handle.lo,
3337 u64);
3338 return qp;
3339}
3340
3341static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3342{
3343 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3344
3345 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3346}
3347
3348/* Return latest CQE (needs processing) */
3349static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3350{
3351 return cq->latest_cqe;
3352}
3353
3354/* In fmr we need to increase the number of fmr completed counter for the fmr
3355 * algorithm determining whether we can free a pbl or not.
3356 * we need to perform this whether the work request was signaled or not. for
3357 * this purpose we call this function from the condition that checks if a wr
3358 * should be skipped, to make sure we don't miss it ( possibly this fmr
3359 * operation was not signalted)
3360 */
3361static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3362{
3363 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3364 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3365}
3366
3367static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3368 struct qedr_cq *cq, int num_entries,
3369 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3370 int force)
3371{
3372 u16 cnt = 0;
3373
3374 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3375 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3376 qedr_chk_if_fmr(qp);
3377 /* skip WC */
3378 goto next_cqe;
3379 }
3380
3381 /* fill WC */
3382 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003383 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003384 wc->wc_flags = 0;
3385 wc->src_qp = qp->id;
3386 wc->qp = &qp->ibqp;
3387
3388 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3389 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3390
3391 switch (wc->opcode) {
3392 case IB_WC_RDMA_WRITE:
3393 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3394 break;
3395 case IB_WC_COMP_SWAP:
3396 case IB_WC_FETCH_ADD:
3397 wc->byte_len = 8;
3398 break;
3399 case IB_WC_REG_MR:
3400 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3401 break;
Michal Kalderondac27382017-06-05 16:32:26 +03003402 case IB_WC_RDMA_READ:
3403 case IB_WC_SEND:
3404 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3405 break;
Ram Amraniafa0e132016-10-10 13:15:36 +03003406 default:
3407 break;
3408 }
3409
3410 num_entries--;
3411 wc++;
3412 cnt++;
3413next_cqe:
3414 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3415 qed_chain_consume(&qp->sq.pbl);
3416 qedr_inc_sw_cons(&qp->sq);
3417 }
3418
3419 return cnt;
3420}
3421
3422static int qedr_poll_cq_req(struct qedr_dev *dev,
3423 struct qedr_qp *qp, struct qedr_cq *cq,
3424 int num_entries, struct ib_wc *wc,
3425 struct rdma_cqe_requester *req)
3426{
3427 int cnt = 0;
3428
3429 switch (req->status) {
3430 case RDMA_CQE_REQ_STS_OK:
3431 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3432 IB_WC_SUCCESS, 0);
3433 break;
3434 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003435 if (qp->state != QED_ROCE_QP_STATE_ERR)
Kalderon, Michaldc728f72018-01-25 13:23:20 +02003436 DP_DEBUG(dev, QEDR_MSG_CQ,
3437 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3438 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003439 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003440 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003441 break;
3442 default:
3443 /* process all WQE before the cosumer */
3444 qp->state = QED_ROCE_QP_STATE_ERR;
3445 cnt = process_req(dev, qp, cq, num_entries, wc,
3446 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3447 wc += cnt;
3448 /* if we have extra WC fill it with actual error info */
3449 if (cnt < num_entries) {
3450 enum ib_wc_status wc_status;
3451
3452 switch (req->status) {
3453 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3454 DP_ERR(dev,
3455 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3456 cq->icid, qp->icid);
3457 wc_status = IB_WC_BAD_RESP_ERR;
3458 break;
3459 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3460 DP_ERR(dev,
3461 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3462 cq->icid, qp->icid);
3463 wc_status = IB_WC_LOC_LEN_ERR;
3464 break;
3465 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3466 DP_ERR(dev,
3467 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3468 cq->icid, qp->icid);
3469 wc_status = IB_WC_LOC_QP_OP_ERR;
3470 break;
3471 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3472 DP_ERR(dev,
3473 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3474 cq->icid, qp->icid);
3475 wc_status = IB_WC_LOC_PROT_ERR;
3476 break;
3477 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3478 DP_ERR(dev,
3479 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3480 cq->icid, qp->icid);
3481 wc_status = IB_WC_MW_BIND_ERR;
3482 break;
3483 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3484 DP_ERR(dev,
3485 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3486 cq->icid, qp->icid);
3487 wc_status = IB_WC_REM_INV_REQ_ERR;
3488 break;
3489 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3490 DP_ERR(dev,
3491 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3492 cq->icid, qp->icid);
3493 wc_status = IB_WC_REM_ACCESS_ERR;
3494 break;
3495 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3496 DP_ERR(dev,
3497 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3498 cq->icid, qp->icid);
3499 wc_status = IB_WC_REM_OP_ERR;
3500 break;
3501 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3502 DP_ERR(dev,
3503 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3504 cq->icid, qp->icid);
3505 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3506 break;
3507 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3508 DP_ERR(dev,
3509 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3510 cq->icid, qp->icid);
3511 wc_status = IB_WC_RETRY_EXC_ERR;
3512 break;
3513 default:
3514 DP_ERR(dev,
3515 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3516 cq->icid, qp->icid);
3517 wc_status = IB_WC_GENERAL_ERR;
3518 }
3519 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3520 wc_status, 1);
3521 }
3522 }
3523
3524 return cnt;
3525}
3526
Amrani, Ramb6acd712017-04-27 13:35:35 +03003527static inline int qedr_cqe_resp_status_to_ib(u8 status)
3528{
3529 switch (status) {
3530 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3531 return IB_WC_LOC_ACCESS_ERR;
3532 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3533 return IB_WC_LOC_LEN_ERR;
3534 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3535 return IB_WC_LOC_QP_OP_ERR;
3536 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3537 return IB_WC_LOC_PROT_ERR;
3538 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3539 return IB_WC_MW_BIND_ERR;
3540 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3541 return IB_WC_REM_INV_RD_REQ_ERR;
3542 case RDMA_CQE_RESP_STS_OK:
3543 return IB_WC_SUCCESS;
3544 default:
3545 return IB_WC_GENERAL_ERR;
3546 }
3547}
3548
3549static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3550 struct ib_wc *wc)
3551{
3552 wc->status = IB_WC_SUCCESS;
3553 wc->byte_len = le32_to_cpu(resp->length);
3554
3555 if (resp->flags & QEDR_RESP_IMM) {
Jason Gunthorpe7bed7eb2018-01-11 14:43:07 -07003556 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
Amrani, Ramb6acd712017-04-27 13:35:35 +03003557 wc->wc_flags |= IB_WC_WITH_IMM;
3558
3559 if (resp->flags & QEDR_RESP_RDMA)
3560 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3561
3562 if (resp->flags & QEDR_RESP_INV)
3563 return -EINVAL;
3564
3565 } else if (resp->flags & QEDR_RESP_INV) {
3566 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3567 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3568
3569 if (resp->flags & QEDR_RESP_RDMA)
3570 return -EINVAL;
3571
3572 } else if (resp->flags & QEDR_RESP_RDMA) {
3573 return -EINVAL;
3574 }
3575
3576 return 0;
3577}
3578
Ram Amraniafa0e132016-10-10 13:15:36 +03003579static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3580 struct qedr_cq *cq, struct ib_wc *wc,
3581 struct rdma_cqe_responder *resp, u64 wr_id)
3582{
Amrani, Ramb6acd712017-04-27 13:35:35 +03003583 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
Ram Amraniafa0e132016-10-10 13:15:36 +03003584 wc->opcode = IB_WC_RECV;
3585 wc->wc_flags = 0;
3586
Amrani, Ramb6acd712017-04-27 13:35:35 +03003587 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3588 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3589 DP_ERR(dev,
3590 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3591 cq, cq->icid, resp->flags);
Ram Amraniafa0e132016-10-10 13:15:36 +03003592
Amrani, Ramb6acd712017-04-27 13:35:35 +03003593 } else {
3594 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3595 if (wc->status == IB_WC_GENERAL_ERR)
3596 DP_ERR(dev,
3597 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3598 cq, cq->icid, resp->status);
Ram Amraniafa0e132016-10-10 13:15:36 +03003599 }
3600
Amrani, Ramb6acd712017-04-27 13:35:35 +03003601 /* Fill the rest of the WC */
Amrani, Ram27035a12016-12-22 14:40:39 +02003602 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003603 wc->src_qp = qp->id;
3604 wc->qp = &qp->ibqp;
3605 wc->wr_id = wr_id;
3606}
3607
3608static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3609 struct qedr_cq *cq, struct ib_wc *wc,
3610 struct rdma_cqe_responder *resp)
3611{
3612 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3613
3614 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3615
3616 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3617 qed_chain_consume(&qp->rq.pbl);
3618 qedr_inc_sw_cons(&qp->rq);
3619
3620 return 1;
3621}
3622
3623static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3624 int num_entries, struct ib_wc *wc, u16 hw_cons)
3625{
3626 u16 cnt = 0;
3627
3628 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3629 /* fill WC */
3630 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003631 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003632 wc->wc_flags = 0;
3633 wc->src_qp = qp->id;
3634 wc->byte_len = 0;
3635 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3636 wc->qp = &qp->ibqp;
3637 num_entries--;
3638 wc++;
3639 cnt++;
3640 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3641 qed_chain_consume(&qp->rq.pbl);
3642 qedr_inc_sw_cons(&qp->rq);
3643 }
3644
3645 return cnt;
3646}
3647
3648static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3649 struct rdma_cqe_responder *resp, int *update)
3650{
3651 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3652 consume_cqe(cq);
3653 *update |= 1;
3654 }
3655}
3656
3657static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3658 struct qedr_cq *cq, int num_entries,
3659 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3660 int *update)
3661{
3662 int cnt;
3663
3664 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3665 cnt = process_resp_flush(qp, cq, num_entries, wc,
3666 resp->rq_cons);
3667 try_consume_resp_cqe(cq, qp, resp, update);
3668 } else {
3669 cnt = process_resp_one(dev, qp, cq, wc, resp);
3670 consume_cqe(cq);
3671 *update |= 1;
3672 }
3673
3674 return cnt;
3675}
3676
3677static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3678 struct rdma_cqe_requester *req, int *update)
3679{
3680 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3681 consume_cqe(cq);
3682 *update |= 1;
3683 }
3684}
3685
3686int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3687{
3688 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3689 struct qedr_cq *cq = get_qedr_cq(ibcq);
Kalderon, Michale3fd1122018-03-05 10:50:10 +02003690 union rdma_cqe *cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003691 u32 old_cons, new_cons;
3692 unsigned long flags;
3693 int update = 0;
3694 int done = 0;
3695
Amrani, Ram4dd72632017-04-27 13:35:34 +03003696 if (cq->destroyed) {
3697 DP_ERR(dev,
3698 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3699 cq, cq->icid);
3700 return 0;
3701 }
3702
Ram Amrani04886772016-10-10 13:15:38 +03003703 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3704 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3705
Ram Amraniafa0e132016-10-10 13:15:36 +03003706 spin_lock_irqsave(&cq->cq_lock, flags);
Kalderon, Michale3fd1122018-03-05 10:50:10 +02003707 cqe = cq->latest_cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003708 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3709 while (num_entries && is_valid_cqe(cq, cqe)) {
3710 struct qedr_qp *qp;
3711 int cnt = 0;
3712
3713 /* prevent speculative reads of any field of CQE */
3714 rmb();
3715
3716 qp = cqe_get_qp(cqe);
3717 if (!qp) {
3718 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3719 break;
3720 }
3721
3722 wc->qp = &qp->ibqp;
3723
3724 switch (cqe_get_type(cqe)) {
3725 case RDMA_CQE_TYPE_REQUESTER:
3726 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3727 &cqe->req);
3728 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3729 break;
3730 case RDMA_CQE_TYPE_RESPONDER_RQ:
3731 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3732 &cqe->resp, &update);
3733 break;
3734 case RDMA_CQE_TYPE_INVALID:
3735 default:
3736 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3737 cqe_get_type(cqe));
3738 }
3739 num_entries -= cnt;
3740 wc += cnt;
3741 done += cnt;
3742
3743 cqe = get_cqe(cq);
3744 }
3745 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3746
3747 cq->cq_cons += new_cons - old_cons;
3748
3749 if (update)
3750 /* doorbell notifies abount latest VALID entry,
3751 * but chain already point to the next INVALID one
3752 */
3753 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3754
3755 spin_unlock_irqrestore(&cq->cq_lock, flags);
3756 return done;
3757}
Ram Amrani993d1b52016-10-10 13:15:39 +03003758
3759int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3760 u8 port_num,
3761 const struct ib_wc *in_wc,
3762 const struct ib_grh *in_grh,
3763 const struct ib_mad_hdr *mad_hdr,
3764 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3765 size_t *out_mad_size, u16 *out_mad_pkey_index)
3766{
3767 struct qedr_dev *dev = get_qedr_dev(ibdev);
3768
3769 DP_DEBUG(dev, QEDR_MSG_GSI,
3770 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3771 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3772 mad_hdr->class_specific, mad_hdr->class_version,
3773 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3774 return IB_MAD_RESULT_SUCCESS;
3775}