blob: a9f494fb892a772c544cd1c0ab7ac682a75c2807 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Kalderon, Michal99d195c2017-07-26 14:41:51 +030052#include "qedr_roce_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
Amrani, Ramc75d3ec2017-06-26 19:05:04 +030056static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
Ram Amrania7efd772016-10-10 13:15:33 +030064int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
Kalderon, Michale6a38c52017-07-26 14:41:52 +030073int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
Ram Amraniac1b36e2016-10-10 13:15:32 +030087int qedr_query_device(struct ib_device *ibdev,
88 struct ib_device_attr *attr, struct ib_udata *udata)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 struct qedr_device_attr *qattr = &dev->attr;
92
93 if (!dev->rdma_ctx) {
94 DP_ERR(dev,
95 "qedr_query_device called with invalid params rdma_ctx=%p\n",
96 dev->rdma_ctx);
97 return -EINVAL;
98 }
99
100 memset(attr, 0, sizeof(*attr));
101
102 attr->fw_ver = qattr->fw_ver;
103 attr->sys_image_guid = qattr->sys_image_guid;
104 attr->max_mr_size = qattr->max_mr_size;
105 attr->page_size_cap = qattr->page_size_caps;
106 attr->vendor_id = qattr->vendor_id;
107 attr->vendor_part_id = qattr->vendor_part_id;
108 attr->hw_ver = qattr->hw_ver;
109 attr->max_qp = qattr->max_qp;
110 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
111 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
112 IB_DEVICE_RC_RNR_NAK_GEN |
113 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
114
115 attr->max_sge = qattr->max_sge;
116 attr->max_sge_rd = qattr->max_sge;
117 attr->max_cq = qattr->max_cq;
118 attr->max_cqe = qattr->max_cqe;
119 attr->max_mr = qattr->max_mr;
120 attr->max_mw = qattr->max_mw;
121 attr->max_pd = qattr->max_pd;
122 attr->atomic_cap = dev->atomic_cap;
123 attr->max_fmr = qattr->max_fmr;
124 attr->max_map_per_fmr = 16;
125 attr->max_qp_init_rd_atom =
126 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
127 attr->max_qp_rd_atom =
128 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
129 attr->max_qp_init_rd_atom);
130
131 attr->max_srq = qattr->max_srq;
132 attr->max_srq_sge = qattr->max_srq_sge;
133 attr->max_srq_wr = qattr->max_srq_wr;
134
135 attr->local_ca_ack_delay = qattr->dev_ack_delay;
136 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
137 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
138 attr->max_ah = qattr->max_ah;
139
140 return 0;
141}
142
143#define QEDR_SPEED_SDR (1)
144#define QEDR_SPEED_DDR (2)
145#define QEDR_SPEED_QDR (4)
146#define QEDR_SPEED_FDR10 (8)
147#define QEDR_SPEED_FDR (16)
148#define QEDR_SPEED_EDR (32)
149
150static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
151 u8 *ib_width)
152{
153 switch (speed) {
154 case 1000:
155 *ib_speed = QEDR_SPEED_SDR;
156 *ib_width = IB_WIDTH_1X;
157 break;
158 case 10000:
159 *ib_speed = QEDR_SPEED_QDR;
160 *ib_width = IB_WIDTH_1X;
161 break;
162
163 case 20000:
164 *ib_speed = QEDR_SPEED_DDR;
165 *ib_width = IB_WIDTH_4X;
166 break;
167
168 case 25000:
169 *ib_speed = QEDR_SPEED_EDR;
170 *ib_width = IB_WIDTH_1X;
171 break;
172
173 case 40000:
174 *ib_speed = QEDR_SPEED_QDR;
175 *ib_width = IB_WIDTH_4X;
176 break;
177
178 case 50000:
179 *ib_speed = QEDR_SPEED_QDR;
180 *ib_width = IB_WIDTH_4X;
181 break;
182
183 case 100000:
184 *ib_speed = QEDR_SPEED_EDR;
185 *ib_width = IB_WIDTH_4X;
186 break;
187
188 default:
189 /* Unsupported */
190 *ib_speed = QEDR_SPEED_SDR;
191 *ib_width = IB_WIDTH_1X;
192 }
193}
194
195int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
196{
197 struct qedr_dev *dev;
198 struct qed_rdma_port *rdma_port;
199
200 dev = get_qedr_dev(ibdev);
201 if (port > 1) {
202 DP_ERR(dev, "invalid_port=0x%x\n", port);
203 return -EINVAL;
204 }
205
206 if (!dev->rdma_ctx) {
207 DP_ERR(dev, "rdma_ctx is NULL\n");
208 return -EINVAL;
209 }
210
211 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300212
Or Gerlitzc4550c62017-01-24 13:02:39 +0200213 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300214 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
215 attr->state = IB_PORT_ACTIVE;
216 attr->phys_state = 5;
217 } else {
218 attr->state = IB_PORT_DOWN;
219 attr->phys_state = 3;
220 }
221 attr->max_mtu = IB_MTU_4096;
222 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
223 attr->lid = 0;
224 attr->lmc = 0;
225 attr->sm_lid = 0;
226 attr->sm_sl = 0;
227 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
Kalderon, Michalf5b1b172017-07-26 14:41:53 +0300228 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
229 attr->gid_tbl_len = 1;
230 attr->pkey_tbl_len = 1;
231 } else {
232 attr->gid_tbl_len = QEDR_MAX_SGID;
233 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
234 }
Ram Amraniac1b36e2016-10-10 13:15:32 +0300235 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
236 attr->qkey_viol_cntr = 0;
237 get_link_speed_and_width(rdma_port->link_speed,
238 &attr->active_speed, &attr->active_width);
239 attr->max_msg_sz = rdma_port->max_msg_size;
240 attr->max_vl_num = 4;
241
242 return 0;
243}
244
245int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
246 struct ib_port_modify *props)
247{
248 struct qedr_dev *dev;
249
250 dev = get_qedr_dev(ibdev);
251 if (port > 1) {
252 DP_ERR(dev, "invalid_port=0x%x\n", port);
253 return -EINVAL;
254 }
255
256 return 0;
257}
258
259static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
260 unsigned long len)
261{
262 struct qedr_mm *mm;
263
264 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
265 if (!mm)
266 return -ENOMEM;
267
268 mm->key.phy_addr = phy_addr;
269 /* This function might be called with a length which is not a multiple
270 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
271 * forces this granularity by increasing the requested size if needed.
272 * When qedr_mmap is called, it will search the list with the updated
273 * length as a key. To prevent search failures, the length is rounded up
274 * in advance to PAGE_SIZE.
275 */
276 mm->key.len = roundup(len, PAGE_SIZE);
277 INIT_LIST_HEAD(&mm->entry);
278
279 mutex_lock(&uctx->mm_list_lock);
280 list_add(&mm->entry, &uctx->mm_head);
281 mutex_unlock(&uctx->mm_list_lock);
282
283 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
284 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
285 (unsigned long long)mm->key.phy_addr,
286 (unsigned long)mm->key.len, uctx);
287
288 return 0;
289}
290
291static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
292 unsigned long len)
293{
294 bool found = false;
295 struct qedr_mm *mm;
296
297 mutex_lock(&uctx->mm_list_lock);
298 list_for_each_entry(mm, &uctx->mm_head, entry) {
299 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
300 continue;
301
302 found = true;
303 break;
304 }
305 mutex_unlock(&uctx->mm_list_lock);
306 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
307 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
308 mm->key.phy_addr, mm->key.len, uctx, found);
309
310 return found;
311}
312
313struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
314 struct ib_udata *udata)
315{
316 int rc;
317 struct qedr_ucontext *ctx;
318 struct qedr_alloc_ucontext_resp uresp;
319 struct qedr_dev *dev = get_qedr_dev(ibdev);
320 struct qed_rdma_add_user_out_params oparams;
321
322 if (!udata)
323 return ERR_PTR(-EFAULT);
324
325 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
326 if (!ctx)
327 return ERR_PTR(-ENOMEM);
328
329 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
330 if (rc) {
331 DP_ERR(dev,
332 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
333 rc);
334 goto err;
335 }
336
337 ctx->dpi = oparams.dpi;
338 ctx->dpi_addr = oparams.dpi_addr;
339 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
340 ctx->dpi_size = oparams.dpi_size;
341 INIT_LIST_HEAD(&ctx->mm_head);
342 mutex_init(&ctx->mm_list_lock);
343
344 memset(&uresp, 0, sizeof(uresp));
345
Amrani, Ramad84dad2017-06-26 19:05:05 +0300346 uresp.dpm_enabled = dev->user_dpm_enabled;
Amrani, Ram67cbe352017-06-26 19:05:06 +0300347 uresp.wids_enabled = 1;
348 uresp.wid_count = oparams.wid_count;
Ram Amraniac1b36e2016-10-10 13:15:32 +0300349 uresp.db_pa = ctx->dpi_phys_addr;
350 uresp.db_size = ctx->dpi_size;
351 uresp.max_send_wr = dev->attr.max_sqe;
352 uresp.max_recv_wr = dev->attr.max_rqe;
353 uresp.max_srq_wr = dev->attr.max_srq_wr;
354 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
355 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
356 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
357 uresp.max_cqes = QEDR_MAX_CQES;
358
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300359 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amraniac1b36e2016-10-10 13:15:32 +0300360 if (rc)
361 goto err;
362
363 ctx->dev = dev;
364
365 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
366 if (rc)
367 goto err;
368
369 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
370 &ctx->ibucontext);
371 return &ctx->ibucontext;
372
373err:
374 kfree(ctx);
375 return ERR_PTR(rc);
376}
377
378int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
379{
380 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
381 struct qedr_mm *mm, *tmp;
382 int status = 0;
383
384 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
385 uctx);
386 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
387
388 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
389 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
390 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
391 mm->key.phy_addr, mm->key.len, uctx);
392 list_del(&mm->entry);
393 kfree(mm);
394 }
395
396 kfree(uctx);
397 return status;
398}
399
400int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
401{
402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
403 struct qedr_dev *dev = get_qedr_dev(context->device);
404 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
405 u64 unmapped_db = dev->db_phys_addr;
406 unsigned long len = (vma->vm_end - vma->vm_start);
407 int rc = 0;
408 bool found;
409
410 DP_DEBUG(dev, QEDR_MSG_INIT,
411 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
412 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
413 if (vma->vm_start & (PAGE_SIZE - 1)) {
414 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
415 vma->vm_start);
416 return -EINVAL;
417 }
418
419 found = qedr_search_mmap(ucontext, vm_page, len);
420 if (!found) {
421 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
422 vma->vm_pgoff);
423 return -EINVAL;
424 }
425
426 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
427
428 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
429 dev->db_size))) {
430 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
431 if (vma->vm_flags & VM_READ) {
432 DP_ERR(dev, "Trying to map doorbell bar for read\n");
433 return -EPERM;
434 }
435
436 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
437
438 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
439 PAGE_SIZE, vma->vm_page_prot);
440 } else {
441 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
442 rc = remap_pfn_range(vma, vma->vm_start,
443 vma->vm_pgoff, len, vma->vm_page_prot);
444 }
445 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
446 return rc;
447}
Ram Amrania7efd772016-10-10 13:15:33 +0300448
449struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
450 struct ib_ucontext *context, struct ib_udata *udata)
451{
452 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300453 struct qedr_pd *pd;
454 u16 pd_id;
455 int rc;
456
457 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
458 (udata && context) ? "User Lib" : "Kernel");
459
460 if (!dev->rdma_ctx) {
Colin Ian King847cb1a2017-08-24 09:25:53 +0100461 DP_ERR(dev, "invalid RDMA context\n");
Ram Amrania7efd772016-10-10 13:15:33 +0300462 return ERR_PTR(-EINVAL);
463 }
464
465 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
466 if (!pd)
467 return ERR_PTR(-ENOMEM);
468
Ram Amrani9c1e0222017-01-24 13:51:42 +0200469 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
470 if (rc)
471 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300472
Ram Amrania7efd772016-10-10 13:15:33 +0300473 pd->pd_id = pd_id;
474
475 if (udata && context) {
Ram Amrani9c1e0222017-01-24 13:51:42 +0200476 struct qedr_alloc_pd_uresp uresp;
477
478 uresp.pd_id = pd_id;
479
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300480 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200481 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300482 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200483 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
484 goto err;
485 }
486
487 pd->uctx = get_qedr_ucontext(context);
488 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300489 }
490
491 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200492
493err:
494 kfree(pd);
495 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300496}
497
498int qedr_dealloc_pd(struct ib_pd *ibpd)
499{
500 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
501 struct qedr_pd *pd = get_qedr_pd(ibpd);
502
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100503 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300504 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100505 return -EINVAL;
506 }
Ram Amrania7efd772016-10-10 13:15:33 +0300507
508 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
509 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
510
511 kfree(pd);
512
513 return 0;
514}
515
516static void qedr_free_pbl(struct qedr_dev *dev,
517 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
518{
519 struct pci_dev *pdev = dev->pdev;
520 int i;
521
522 for (i = 0; i < pbl_info->num_pbls; i++) {
523 if (!pbl[i].va)
524 continue;
525 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
526 pbl[i].va, pbl[i].pa);
527 }
528
529 kfree(pbl);
530}
531
532#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
533#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
534
535#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
536#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
537#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
538
539static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
540 struct qedr_pbl_info *pbl_info,
541 gfp_t flags)
542{
543 struct pci_dev *pdev = dev->pdev;
544 struct qedr_pbl *pbl_table;
545 dma_addr_t *pbl_main_tbl;
546 dma_addr_t pa;
547 void *va;
548 int i;
549
550 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
551 if (!pbl_table)
552 return ERR_PTR(-ENOMEM);
553
554 for (i = 0; i < pbl_info->num_pbls; i++) {
Himanshu Jha7bced912017-12-31 18:01:03 +0530555 va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
556 &pa, flags);
Ram Amrania7efd772016-10-10 13:15:33 +0300557 if (!va)
558 goto err;
559
Ram Amrania7efd772016-10-10 13:15:33 +0300560 pbl_table[i].va = va;
561 pbl_table[i].pa = pa;
562 }
563
564 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
565 * the first one with physical pointers to all of the rest
566 */
567 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
568 for (i = 0; i < pbl_info->num_pbls - 1; i++)
569 pbl_main_tbl[i] = pbl_table[i + 1].pa;
570
571 return pbl_table;
572
573err:
574 for (i--; i >= 0; i--)
575 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
576 pbl_table[i].va, pbl_table[i].pa);
577
578 qedr_free_pbl(dev, pbl_info, pbl_table);
579
580 return ERR_PTR(-ENOMEM);
581}
582
583static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
584 struct qedr_pbl_info *pbl_info,
585 u32 num_pbes, int two_layer_capable)
586{
587 u32 pbl_capacity;
588 u32 pbl_size;
589 u32 num_pbls;
590
591 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
592 if (num_pbes > MAX_PBES_TWO_LAYER) {
593 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
594 num_pbes);
595 return -EINVAL;
596 }
597
598 /* calculate required pbl page size */
599 pbl_size = MIN_FW_PBL_PAGE_SIZE;
600 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
601 NUM_PBES_ON_PAGE(pbl_size);
602
603 while (pbl_capacity < num_pbes) {
604 pbl_size *= 2;
605 pbl_capacity = pbl_size / sizeof(u64);
606 pbl_capacity = pbl_capacity * pbl_capacity;
607 }
608
609 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
610 num_pbls++; /* One for the layer0 ( points to the pbls) */
611 pbl_info->two_layered = true;
612 } else {
613 /* One layered PBL */
614 num_pbls = 1;
615 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
616 roundup_pow_of_two((num_pbes * sizeof(u64))));
617 pbl_info->two_layered = false;
618 }
619
620 pbl_info->num_pbls = num_pbls;
621 pbl_info->pbl_size = pbl_size;
622 pbl_info->num_pbes = num_pbes;
623
624 DP_DEBUG(dev, QEDR_MSG_MR,
625 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
626 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
627
628 return 0;
629}
630
631static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
632 struct qedr_pbl *pbl,
Ram Amranie57bb6b2017-06-05 16:32:27 +0300633 struct qedr_pbl_info *pbl_info, u32 pg_shift)
Ram Amrania7efd772016-10-10 13:15:33 +0300634{
635 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300636 u32 fw_pg_cnt, fw_pg_per_umem_pg;
Ram Amrania7efd772016-10-10 13:15:33 +0300637 struct qedr_pbl *pbl_tbl;
638 struct scatterlist *sg;
639 struct regpair *pbe;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300640 u64 pg_addr;
Ram Amrania7efd772016-10-10 13:15:33 +0300641 int entry;
Ram Amrania7efd772016-10-10 13:15:33 +0300642
643 if (!pbl_info->num_pbes)
644 return;
645
646 /* If we have a two layered pbl, the first pbl points to the rest
647 * of the pbls and the first entry lays on the second pbl in the table
648 */
649 if (pbl_info->two_layered)
650 pbl_tbl = &pbl[1];
651 else
652 pbl_tbl = pbl;
653
654 pbe = (struct regpair *)pbl_tbl->va;
655 if (!pbe) {
656 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
657 return;
658 }
659
660 pbe_cnt = 0;
661
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300662 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300663
Ram Amranie57bb6b2017-06-05 16:32:27 +0300664 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
665
Ram Amrania7efd772016-10-10 13:15:33 +0300666 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
667 pages = sg_dma_len(sg) >> shift;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300668 pg_addr = sg_dma_address(sg);
Ram Amrania7efd772016-10-10 13:15:33 +0300669 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
Ram Amranie57bb6b2017-06-05 16:32:27 +0300670 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
671 pbe->lo = cpu_to_le32(pg_addr);
672 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
Ram Amrania7efd772016-10-10 13:15:33 +0300673
Ram Amranie57bb6b2017-06-05 16:32:27 +0300674 pg_addr += BIT(pg_shift);
675 pbe_cnt++;
676 total_num_pbes++;
677 pbe++;
Ram Amrania7efd772016-10-10 13:15:33 +0300678
Ram Amranie57bb6b2017-06-05 16:32:27 +0300679 if (total_num_pbes == pbl_info->num_pbes)
680 return;
681
682 /* If the given pbl is full storing the pbes,
683 * move to next pbl.
684 */
685 if (pbe_cnt ==
686 (pbl_info->pbl_size / sizeof(u64))) {
687 pbl_tbl++;
688 pbe = (struct regpair *)pbl_tbl->va;
689 pbe_cnt = 0;
690 }
691
692 fw_pg_cnt++;
Ram Amrania7efd772016-10-10 13:15:33 +0300693 }
694 }
695 }
696}
697
698static int qedr_copy_cq_uresp(struct qedr_dev *dev,
699 struct qedr_cq *cq, struct ib_udata *udata)
700{
701 struct qedr_create_cq_uresp uresp;
702 int rc;
703
704 memset(&uresp, 0, sizeof(uresp));
705
706 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
707 uresp.icid = cq->icid;
708
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300709 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrania7efd772016-10-10 13:15:33 +0300710 if (rc)
711 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
712
713 return rc;
714}
715
716static void consume_cqe(struct qedr_cq *cq)
717{
718 if (cq->latest_cqe == cq->toggle_cqe)
719 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
720
721 cq->latest_cqe = qed_chain_consume(&cq->pbl);
722}
723
724static inline int qedr_align_cq_entries(int entries)
725{
726 u64 size, aligned_size;
727
728 /* We allocate an extra entry that we don't report to the FW. */
729 size = (entries + 1) * QEDR_CQE_SIZE;
730 aligned_size = ALIGN(size, PAGE_SIZE);
731
732 return aligned_size / QEDR_CQE_SIZE;
733}
734
735static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
736 struct qedr_dev *dev,
737 struct qedr_userq *q,
738 u64 buf_addr, size_t buf_len,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300739 int access, int dmasync,
740 int alloc_and_init)
Ram Amrania7efd772016-10-10 13:15:33 +0300741{
Ram Amranie57bb6b2017-06-05 16:32:27 +0300742 u32 fw_pages;
Ram Amrania7efd772016-10-10 13:15:33 +0300743 int rc;
744
745 q->buf_addr = buf_addr;
746 q->buf_len = buf_len;
747 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
748 if (IS_ERR(q->umem)) {
749 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
750 PTR_ERR(q->umem));
751 return PTR_ERR(q->umem);
752 }
753
Ram Amranie57bb6b2017-06-05 16:32:27 +0300754 fw_pages = ib_umem_page_count(q->umem) <<
755 (q->umem->page_shift - FW_PAGE_SHIFT);
756
757 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
Ram Amrania7efd772016-10-10 13:15:33 +0300758 if (rc)
759 goto err0;
760
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300761 if (alloc_and_init) {
762 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
763 if (IS_ERR(q->pbl_tbl)) {
764 rc = PTR_ERR(q->pbl_tbl);
765 goto err0;
766 }
Ram Amranie57bb6b2017-06-05 16:32:27 +0300767 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
768 FW_PAGE_SHIFT);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300769 } else {
770 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
Dan Carpenter89fd2572017-08-25 11:18:39 +0300771 if (!q->pbl_tbl) {
772 rc = -ENOMEM;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300773 goto err0;
Dan Carpenter89fd2572017-08-25 11:18:39 +0300774 }
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300775 }
Ram Amrania7efd772016-10-10 13:15:33 +0300776
777 return 0;
778
779err0:
780 ib_umem_release(q->umem);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300781 q->umem = NULL;
Ram Amrania7efd772016-10-10 13:15:33 +0300782
783 return rc;
784}
785
786static inline void qedr_init_cq_params(struct qedr_cq *cq,
787 struct qedr_ucontext *ctx,
788 struct qedr_dev *dev, int vector,
789 int chain_entries, int page_cnt,
790 u64 pbl_ptr,
791 struct qed_rdma_create_cq_in_params
792 *params)
793{
794 memset(params, 0, sizeof(*params));
795 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
796 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
797 params->cnq_id = vector;
798 params->cq_size = chain_entries - 1;
799 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
800 params->pbl_num_pages = page_cnt;
801 params->pbl_ptr = pbl_ptr;
802 params->pbl_two_level = 0;
803}
804
805static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
806{
807 /* Flush data before signalling doorbell */
808 wmb();
809 cq->db.data.agg_flags = flags;
810 cq->db.data.value = cpu_to_le32(cons);
811 writeq(cq->db.raw, cq->db_addr);
812
813 /* Make sure write would stick */
814 mmiowb();
815}
816
817int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
818{
819 struct qedr_cq *cq = get_qedr_cq(ibcq);
820 unsigned long sflags;
Amrani, Ram4dd72632017-04-27 13:35:34 +0300821 struct qedr_dev *dev;
822
823 dev = get_qedr_dev(ibcq->device);
824
825 if (cq->destroyed) {
826 DP_ERR(dev,
827 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
828 cq, cq->icid);
829 return -EINVAL;
830 }
831
Ram Amrania7efd772016-10-10 13:15:33 +0300832
833 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
834 return 0;
835
836 spin_lock_irqsave(&cq->cq_lock, sflags);
837
838 cq->arm_flags = 0;
839
840 if (flags & IB_CQ_SOLICITED)
841 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
842
843 if (flags & IB_CQ_NEXT_COMP)
844 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
845
846 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
847
848 spin_unlock_irqrestore(&cq->cq_lock, sflags);
849
850 return 0;
851}
852
853struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
854 const struct ib_cq_init_attr *attr,
855 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
856{
857 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
858 struct qed_rdma_destroy_cq_out_params destroy_oparams;
859 struct qed_rdma_destroy_cq_in_params destroy_iparams;
860 struct qedr_dev *dev = get_qedr_dev(ibdev);
861 struct qed_rdma_create_cq_in_params params;
862 struct qedr_create_cq_ureq ureq;
863 int vector = attr->comp_vector;
864 int entries = attr->cqe;
865 struct qedr_cq *cq;
866 int chain_entries;
867 int page_cnt;
868 u64 pbl_ptr;
869 u16 icid;
870 int rc;
871
872 DP_DEBUG(dev, QEDR_MSG_INIT,
873 "create_cq: called from %s. entries=%d, vector=%d\n",
874 udata ? "User Lib" : "Kernel", entries, vector);
875
876 if (entries > QEDR_MAX_CQES) {
877 DP_ERR(dev,
878 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
879 entries, QEDR_MAX_CQES);
880 return ERR_PTR(-EINVAL);
881 }
882
883 chain_entries = qedr_align_cq_entries(entries);
884 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
885
886 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
887 if (!cq)
888 return ERR_PTR(-ENOMEM);
889
890 if (udata) {
891 memset(&ureq, 0, sizeof(ureq));
892 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
893 DP_ERR(dev,
894 "create cq: problem copying data from user space\n");
895 goto err0;
896 }
897
898 if (!ureq.len) {
899 DP_ERR(dev,
900 "create cq: cannot create a cq with 0 entries\n");
901 goto err0;
902 }
903
904 cq->cq_type = QEDR_CQ_TYPE_USER;
905
906 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300907 ureq.len, IB_ACCESS_LOCAL_WRITE,
908 1, 1);
Ram Amrania7efd772016-10-10 13:15:33 +0300909 if (rc)
910 goto err0;
911
912 pbl_ptr = cq->q.pbl_tbl->pa;
913 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200914
915 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300916 } else {
917 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
918
919 rc = dev->ops->common->chain_alloc(dev->cdev,
920 QED_CHAIN_USE_TO_CONSUME,
921 QED_CHAIN_MODE_PBL,
922 QED_CHAIN_CNT_TYPE_U32,
923 chain_entries,
924 sizeof(union rdma_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +0300925 &cq->pbl, NULL);
Ram Amrania7efd772016-10-10 13:15:33 +0300926 if (rc)
927 goto err1;
928
929 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
930 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200931 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300932 }
933
934 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
935 pbl_ptr, &params);
936
937 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
938 if (rc)
939 goto err2;
940
941 cq->icid = icid;
942 cq->sig = QEDR_CQ_MAGIC_NUMBER;
943 spin_lock_init(&cq->cq_lock);
944
945 if (ib_ctx) {
946 rc = qedr_copy_cq_uresp(dev, cq, udata);
947 if (rc)
948 goto err3;
949 } else {
950 /* Generate doorbell address. */
951 cq->db_addr = dev->db_addr +
952 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
953 cq->db.data.icid = cq->icid;
954 cq->db.data.params = DB_AGG_CMD_SET <<
955 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
956
957 /* point to the very last element, passing it we will toggle */
958 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
959 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
960 cq->latest_cqe = NULL;
961 consume_cqe(cq);
962 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
963 }
964
965 DP_DEBUG(dev, QEDR_MSG_CQ,
966 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
967 cq->icid, cq, params.cq_size);
968
969 return &cq->ibcq;
970
971err3:
972 destroy_iparams.icid = cq->icid;
973 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
974 &destroy_oparams);
975err2:
976 if (udata)
977 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
978 else
979 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
980err1:
981 if (udata)
982 ib_umem_release(cq->q.umem);
983err0:
984 kfree(cq);
985 return ERR_PTR(-EINVAL);
986}
987
988int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
989{
990 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
991 struct qedr_cq *cq = get_qedr_cq(ibcq);
992
993 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
994
995 return 0;
996}
997
Amrani, Ram4dd72632017-04-27 13:35:34 +0300998#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
999#define QEDR_DESTROY_CQ_ITER_DURATION (10)
1000
Ram Amrania7efd772016-10-10 13:15:33 +03001001int qedr_destroy_cq(struct ib_cq *ibcq)
1002{
1003 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1004 struct qed_rdma_destroy_cq_out_params oparams;
1005 struct qed_rdma_destroy_cq_in_params iparams;
1006 struct qedr_cq *cq = get_qedr_cq(ibcq);
Amrani, Ram4dd72632017-04-27 13:35:34 +03001007 int iter;
Amrani, Ram942b3b22017-04-27 13:35:33 +03001008 int rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001009
Amrani, Ram942b3b22017-04-27 13:35:33 +03001010 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
Ram Amrania7efd772016-10-10 13:15:33 +03001011
Amrani, Ram4dd72632017-04-27 13:35:34 +03001012 cq->destroyed = 1;
1013
Ram Amrania7efd772016-10-10 13:15:33 +03001014 /* GSIs CQs are handled by driver, so they don't exist in the FW */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001015 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1016 goto done;
Amrani, Rama1211352016-12-22 14:40:34 +02001017
Amrani, Ram942b3b22017-04-27 13:35:33 +03001018 iparams.icid = cq->icid;
1019 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1020 if (rc)
1021 return rc;
1022
1023 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +03001024
1025 if (ibcq->uobject && ibcq->uobject->context) {
1026 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1027 ib_umem_release(cq->q.umem);
1028 }
1029
Amrani, Ram4dd72632017-04-27 13:35:34 +03001030 /* We don't want the IRQ handler to handle a non-existing CQ so we
1031 * wait until all CNQ interrupts, if any, are received. This will always
1032 * happen and will always happen very fast. If not, then a serious error
1033 * has occured. That is why we can use a long delay.
1034 * We spin for a short time so we don’t lose time on context switching
1035 * in case all the completions are handled in that span. Otherwise
1036 * we sleep for a while and check again. Since the CNQ may be
1037 * associated with (only) the current CPU we use msleep to allow the
1038 * current CPU to be freed.
1039 * The CNQ notification is increased in qedr_irq_handler().
1040 */
1041 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1042 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1043 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1044 iter--;
1045 }
1046
1047 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1048 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1049 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1050 iter--;
1051 }
1052
1053 if (oparams.num_cq_notif != cq->cnq_notif)
1054 goto err;
1055
1056 /* Note that we don't need to have explicit code to wait for the
1057 * completion of the event handler because it is invoked from the EQ.
1058 * Since the destroy CQ ramrod has also been received on the EQ we can
1059 * be certain that there's no event handler in process.
1060 */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001061done:
Amrani, Ram4dd72632017-04-27 13:35:34 +03001062 cq->sig = ~cq->sig;
1063
Ram Amrania7efd772016-10-10 13:15:33 +03001064 kfree(cq);
1065
1066 return 0;
Amrani, Ram4dd72632017-04-27 13:35:34 +03001067
1068err:
1069 DP_ERR(dev,
1070 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1071 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1072
1073 return -EINVAL;
Ram Amrania7efd772016-10-10 13:15:33 +03001074}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001075
1076static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1077 struct ib_qp_attr *attr,
1078 int attr_mask,
1079 struct qed_rdma_modify_qp_in_params
1080 *qp_params)
1081{
1082 enum rdma_network_type nw_type;
1083 struct ib_gid_attr gid_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001084 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001085 union ib_gid gid;
1086 u32 ipv4_addr;
1087 int rc = 0;
1088 int i;
1089
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001090 rc = ib_get_cached_gid(ibqp->device,
1091 rdma_ah_get_port_num(&attr->ah_attr),
1092 grh->sgid_index, &gid, &gid_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001093 if (rc)
1094 return rc;
1095
Parav Pandit3e44e0e2018-04-01 15:08:23 +03001096 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001097
Parav Pandit3e44e0e2018-04-01 15:08:23 +03001098 dev_put(gid_attr.ndev);
1099 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1100 switch (nw_type) {
1101 case RDMA_NETWORK_IPV6:
1102 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1103 sizeof(qp_params->sgid));
1104 memcpy(&qp_params->dgid.bytes[0],
1105 &grh->dgid,
1106 sizeof(qp_params->dgid));
1107 qp_params->roce_mode = ROCE_V2_IPV6;
1108 SET_FIELD(qp_params->modify_flags,
1109 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1110 break;
1111 case RDMA_NETWORK_IB:
1112 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1113 sizeof(qp_params->sgid));
1114 memcpy(&qp_params->dgid.bytes[0],
1115 &grh->dgid,
1116 sizeof(qp_params->dgid));
1117 qp_params->roce_mode = ROCE_V1;
1118 break;
1119 case RDMA_NETWORK_IPV4:
1120 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1121 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1122 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1123 qp_params->sgid.ipv4_addr = ipv4_addr;
1124 ipv4_addr =
1125 qedr_get_ipv4_from_gid(grh->dgid.raw);
1126 qp_params->dgid.ipv4_addr = ipv4_addr;
1127 SET_FIELD(qp_params->modify_flags,
1128 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1129 qp_params->roce_mode = ROCE_V2_IPV4;
1130 break;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001131 }
1132
1133 for (i = 0; i < 4; i++) {
1134 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1135 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1136 }
1137
1138 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1139 qp_params->vlan_id = 0;
1140
1141 return 0;
1142}
1143
Ram Amranicecbcdd2016-10-10 13:15:34 +03001144static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1145 struct ib_qp_init_attr *attrs)
1146{
1147 struct qedr_device_attr *qattr = &dev->attr;
1148
1149 /* QP0... attrs->qp_type == IB_QPT_GSI */
1150 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1151 DP_DEBUG(dev, QEDR_MSG_QP,
1152 "create qp: unsupported qp type=0x%x requested\n",
1153 attrs->qp_type);
1154 return -EINVAL;
1155 }
1156
1157 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1158 DP_ERR(dev,
1159 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1160 attrs->cap.max_send_wr, qattr->max_sqe);
1161 return -EINVAL;
1162 }
1163
1164 if (attrs->cap.max_inline_data > qattr->max_inline) {
1165 DP_ERR(dev,
1166 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1167 attrs->cap.max_inline_data, qattr->max_inline);
1168 return -EINVAL;
1169 }
1170
1171 if (attrs->cap.max_send_sge > qattr->max_sge) {
1172 DP_ERR(dev,
1173 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1174 attrs->cap.max_send_sge, qattr->max_sge);
1175 return -EINVAL;
1176 }
1177
1178 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1179 DP_ERR(dev,
1180 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1181 attrs->cap.max_recv_sge, qattr->max_sge);
1182 return -EINVAL;
1183 }
1184
1185 /* Unprivileged user space cannot create special QP */
1186 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1187 DP_ERR(dev,
1188 "create qp: userspace can't create special QPs of type=0x%x\n",
1189 attrs->qp_type);
1190 return -EINVAL;
1191 }
1192
1193 return 0;
1194}
1195
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001196static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1197 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001198 struct qedr_qp *qp)
1199{
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001200 /* iWARP requires two doorbells per RQ. */
1201 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1202 uresp->rq_db_offset =
1203 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1204 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1205 } else {
1206 uresp->rq_db_offset =
1207 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1208 }
1209
Ram Amranicecbcdd2016-10-10 13:15:34 +03001210 uresp->rq_icid = qp->icid;
1211}
1212
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001213static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1214 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001215 struct qedr_qp *qp)
1216{
1217 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001218
1219 /* iWARP uses the same cid for rq and sq */
1220 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1221 uresp->sq_icid = qp->icid;
1222 else
1223 uresp->sq_icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001224}
1225
1226static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1227 struct qedr_qp *qp, struct ib_udata *udata)
1228{
1229 struct qedr_create_qp_uresp uresp;
1230 int rc;
1231
1232 memset(&uresp, 0, sizeof(uresp));
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001233 qedr_copy_sq_uresp(dev, &uresp, qp);
1234 qedr_copy_rq_uresp(dev, &uresp, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001235
1236 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1237 uresp.qp_id = qp->qp_id;
1238
Amrani, Ramc75d3ec2017-06-26 19:05:04 +03001239 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranicecbcdd2016-10-10 13:15:34 +03001240 if (rc)
1241 DP_ERR(dev,
1242 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1243 qp->icid);
1244
1245 return rc;
1246}
1247
Amrani, Ramdf158562016-12-22 14:52:24 +02001248static void qedr_set_common_qp_params(struct qedr_dev *dev,
1249 struct qedr_qp *qp,
1250 struct qedr_pd *pd,
1251 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001252{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001253 spin_lock_init(&qp->q_lock);
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001254 atomic_set(&qp->refcnt, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001255 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001256 qp->qp_type = attrs->qp_type;
1257 qp->max_inline_data = attrs->cap.max_inline_data;
1258 qp->sq.max_sges = attrs->cap.max_send_sge;
1259 qp->state = QED_ROCE_QP_STATE_RESET;
1260 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1261 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1262 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1263 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001264 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001265
1266 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001267 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1268 qp->rq.max_sges, qp->rq_cq->icid);
1269 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001270 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1271 pd->pd_id, qp->qp_type, qp->max_inline_data,
1272 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1273 DP_DEBUG(dev, QEDR_MSG_QP,
1274 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1275 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001276}
1277
Amrani, Ramdf158562016-12-22 14:52:24 +02001278static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001279{
1280 qp->sq.db = dev->db_addr +
1281 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1282 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001283 qp->rq.db = dev->db_addr +
1284 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1285 qp->rq.db_data.data.icid = qp->icid;
1286}
1287
Amrani, Ramdf158562016-12-22 14:52:24 +02001288static inline void
1289qedr_init_common_qp_in_params(struct qedr_dev *dev,
1290 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001291 struct qedr_qp *qp,
1292 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001293 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001294 struct qed_rdma_create_qp_in_params *params)
1295{
Amrani, Ramdf158562016-12-22 14:52:24 +02001296 /* QP handle to be written in an async event */
1297 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1298 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001299
Amrani, Ramdf158562016-12-22 14:52:24 +02001300 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1301 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1302 params->pd = pd->pd_id;
1303 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1304 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1305 params->stats_queue = 0;
1306 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1307 params->srq_id = 0;
1308 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001309}
1310
Amrani, Ramdf158562016-12-22 14:52:24 +02001311static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001312{
Amrani, Ramdf158562016-12-22 14:52:24 +02001313 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1314 "qp=%p. "
1315 "sq_addr=0x%llx, "
1316 "sq_len=%zd, "
1317 "rq_addr=0x%llx, "
1318 "rq_len=%zd"
1319 "\n",
1320 qp,
1321 qp->usq.buf_addr,
1322 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1323}
1324
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001325static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
1326{
1327 int rc;
1328
1329 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1330 return 0;
1331
1332 idr_preload(GFP_KERNEL);
1333 spin_lock_irq(&dev->idr_lock);
1334
1335 rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
1336
1337 spin_unlock_irq(&dev->idr_lock);
1338 idr_preload_end();
1339
1340 return rc < 0 ? rc : 0;
1341}
1342
1343static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
1344{
1345 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1346 return;
1347
1348 spin_lock_irq(&dev->idr_lock);
1349 idr_remove(&dev->qpidr, id);
1350 spin_unlock_irq(&dev->idr_lock);
1351}
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001352
1353static inline void
1354qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1355 struct qedr_qp *qp,
1356 struct qed_rdma_create_qp_out_params *out_params)
1357{
1358 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1359 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1360
1361 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1362 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1363
1364 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1365 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1366
1367 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1368 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1369}
1370
Amrani, Ramdf158562016-12-22 14:52:24 +02001371static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1372{
1373 if (qp->usq.umem)
1374 ib_umem_release(qp->usq.umem);
1375 qp->usq.umem = NULL;
1376
1377 if (qp->urq.umem)
1378 ib_umem_release(qp->urq.umem);
1379 qp->urq.umem = NULL;
1380}
1381
1382static int qedr_create_user_qp(struct qedr_dev *dev,
1383 struct qedr_qp *qp,
1384 struct ib_pd *ibpd,
1385 struct ib_udata *udata,
1386 struct ib_qp_init_attr *attrs)
1387{
1388 struct qed_rdma_create_qp_in_params in_params;
1389 struct qed_rdma_create_qp_out_params out_params;
1390 struct qedr_pd *pd = get_qedr_pd(ibpd);
1391 struct ib_ucontext *ib_ctx = NULL;
Amrani, Ramdf158562016-12-22 14:52:24 +02001392 struct qedr_create_qp_ureq ureq;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001393 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001394 int rc = -EINVAL;
1395
1396 ib_ctx = ibpd->uobject->context;
Amrani, Ramdf158562016-12-22 14:52:24 +02001397
1398 memset(&ureq, 0, sizeof(ureq));
1399 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1400 if (rc) {
1401 DP_ERR(dev, "Problem copying data from user space\n");
1402 return rc;
1403 }
1404
1405 /* SQ - read access only (0), dma sync not required (0) */
1406 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001407 ureq.sq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001408 if (rc)
1409 return rc;
1410
1411 /* RQ - read access only (0), dma sync not required (0) */
1412 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001413 ureq.rq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001414 if (rc)
1415 return rc;
1416
1417 memset(&in_params, 0, sizeof(in_params));
1418 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1419 in_params.qp_handle_lo = ureq.qp_handle_lo;
1420 in_params.qp_handle_hi = ureq.qp_handle_hi;
1421 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1422 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1423 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1424 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1425
1426 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1427 &in_params, &out_params);
1428
1429 if (!qp->qed_qp) {
1430 rc = -ENOMEM;
1431 goto err1;
1432 }
1433
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001434 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1435 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1436
Amrani, Ramdf158562016-12-22 14:52:24 +02001437 qp->qp_id = out_params.qp_id;
1438 qp->icid = out_params.icid;
1439
1440 rc = qedr_copy_qp_uresp(dev, qp, udata);
1441 if (rc)
1442 goto err;
1443
1444 qedr_qp_user_print(dev, qp);
1445
1446 return 0;
1447err:
1448 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1449 if (rc)
1450 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1451
1452err1:
1453 qedr_cleanup_user(dev, qp);
1454 return rc;
1455}
1456
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001457static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1458{
1459 qp->sq.db = dev->db_addr +
1460 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1461 qp->sq.db_data.data.icid = qp->icid;
1462
1463 qp->rq.db = dev->db_addr +
1464 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1465 qp->rq.db_data.data.icid = qp->icid;
1466 qp->rq.iwarp_db2 = dev->db_addr +
1467 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1468 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1469 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1470}
1471
Amrani, Ramdf158562016-12-22 14:52:24 +02001472static int
1473qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1474 struct qedr_qp *qp,
1475 struct qed_rdma_create_qp_in_params *in_params,
1476 u32 n_sq_elems, u32 n_rq_elems)
1477{
1478 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001479 int rc;
1480
Ram Amranicecbcdd2016-10-10 13:15:34 +03001481 rc = dev->ops->common->chain_alloc(dev->cdev,
1482 QED_CHAIN_USE_TO_PRODUCE,
1483 QED_CHAIN_MODE_PBL,
1484 QED_CHAIN_CNT_TYPE_U32,
1485 n_sq_elems,
1486 QEDR_SQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001487 &qp->sq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001488
1489 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001490 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001491
Amrani, Ramdf158562016-12-22 14:52:24 +02001492 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1493 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001494
Ram Amranicecbcdd2016-10-10 13:15:34 +03001495 rc = dev->ops->common->chain_alloc(dev->cdev,
1496 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1497 QED_CHAIN_MODE_PBL,
1498 QED_CHAIN_CNT_TYPE_U32,
1499 n_rq_elems,
1500 QEDR_RQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001501 &qp->rq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001502 if (rc)
1503 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001504
Amrani, Ramdf158562016-12-22 14:52:24 +02001505 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1506 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001507
Amrani, Ramdf158562016-12-22 14:52:24 +02001508 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1509 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001510
Amrani, Ramdf158562016-12-22 14:52:24 +02001511 if (!qp->qed_qp)
1512 return -EINVAL;
1513
1514 qp->qp_id = out_params.qp_id;
1515 qp->icid = out_params.icid;
1516
1517 qedr_set_roce_db_info(dev, qp);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001518 return rc;
1519}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001520
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001521static int
1522qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1523 struct qedr_qp *qp,
1524 struct qed_rdma_create_qp_in_params *in_params,
1525 u32 n_sq_elems, u32 n_rq_elems)
1526{
1527 struct qed_rdma_create_qp_out_params out_params;
1528 struct qed_chain_ext_pbl ext_pbl;
1529 int rc;
1530
1531 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1532 QEDR_SQE_ELEMENT_SIZE,
1533 QED_CHAIN_MODE_PBL);
1534 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1535 QEDR_RQE_ELEMENT_SIZE,
1536 QED_CHAIN_MODE_PBL);
1537
1538 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1539 in_params, &out_params);
1540
1541 if (!qp->qed_qp)
1542 return -EINVAL;
1543
1544 /* Now we allocate the chain */
1545 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1546 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1547
1548 rc = dev->ops->common->chain_alloc(dev->cdev,
1549 QED_CHAIN_USE_TO_PRODUCE,
1550 QED_CHAIN_MODE_PBL,
1551 QED_CHAIN_CNT_TYPE_U32,
1552 n_sq_elems,
1553 QEDR_SQE_ELEMENT_SIZE,
1554 &qp->sq.pbl, &ext_pbl);
1555
1556 if (rc)
1557 goto err;
1558
1559 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1560 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1561
1562 rc = dev->ops->common->chain_alloc(dev->cdev,
1563 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1564 QED_CHAIN_MODE_PBL,
1565 QED_CHAIN_CNT_TYPE_U32,
1566 n_rq_elems,
1567 QEDR_RQE_ELEMENT_SIZE,
1568 &qp->rq.pbl, &ext_pbl);
1569
1570 if (rc)
1571 goto err;
1572
1573 qp->qp_id = out_params.qp_id;
1574 qp->icid = out_params.icid;
1575
1576 qedr_set_iwarp_db_info(dev, qp);
1577 return rc;
1578
1579err:
1580 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1581
1582 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001583}
1584
Amrani, Ramdf158562016-12-22 14:52:24 +02001585static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001586{
Amrani, Ramdf158562016-12-22 14:52:24 +02001587 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1588 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001589
Amrani, Ramdf158562016-12-22 14:52:24 +02001590 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1591 kfree(qp->rqe_wr_id);
1592}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001593
Amrani, Ramdf158562016-12-22 14:52:24 +02001594static int qedr_create_kernel_qp(struct qedr_dev *dev,
1595 struct qedr_qp *qp,
1596 struct ib_pd *ibpd,
1597 struct ib_qp_init_attr *attrs)
1598{
1599 struct qed_rdma_create_qp_in_params in_params;
1600 struct qedr_pd *pd = get_qedr_pd(ibpd);
1601 int rc = -EINVAL;
1602 u32 n_rq_elems;
1603 u32 n_sq_elems;
1604 u32 n_sq_entries;
1605
1606 memset(&in_params, 0, sizeof(in_params));
1607
1608 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1609 * the ring. The ring should allow at least a single WR, even if the
1610 * user requested none, due to allocation issues.
1611 * We should add an extra WR since the prod and cons indices of
1612 * wqe_wr_id are managed in such a way that the WQ is considered full
1613 * when (prod+1)%max_wr==cons. We currently don't do that because we
1614 * double the number of entries due an iSER issue that pushes far more
1615 * WRs than indicated. If we decline its ib_post_send() then we get
1616 * error prints in the dmesg we'd like to avoid.
1617 */
1618 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1619 dev->attr.max_sqe);
1620
1621 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1622 GFP_KERNEL);
1623 if (!qp->wqe_wr_id) {
1624 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1625 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001626 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001627
Amrani, Ramdf158562016-12-22 14:52:24 +02001628 /* QP handle to be written in CQE */
1629 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1630 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001631
Amrani, Ramdf158562016-12-22 14:52:24 +02001632 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1633 * the ring. There ring should allow at least a single WR, even if the
1634 * user requested none, due to allocation issues.
1635 */
1636 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1637
1638 /* Allocate driver internal RQ array */
1639 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1640 GFP_KERNEL);
1641 if (!qp->rqe_wr_id) {
1642 DP_ERR(dev,
1643 "create qp: failed RQ shadow memory allocation\n");
1644 kfree(qp->wqe_wr_id);
1645 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001646 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001647
Amrani, Ramdf158562016-12-22 14:52:24 +02001648 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001649
Amrani, Ramdf158562016-12-22 14:52:24 +02001650 n_sq_entries = attrs->cap.max_send_wr;
1651 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1652 n_sq_entries = max_t(u32, n_sq_entries, 1);
1653 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001654
Amrani, Ramdf158562016-12-22 14:52:24 +02001655 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1656
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001657 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1658 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1659 n_sq_elems, n_rq_elems);
1660 else
1661 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1662 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001663 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001664 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001665
1666 return rc;
1667}
1668
1669struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1670 struct ib_qp_init_attr *attrs,
1671 struct ib_udata *udata)
1672{
1673 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001674 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001675 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001676 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001677 int rc = 0;
1678
1679 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1680 udata ? "user library" : "kernel", pd);
1681
1682 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1683 if (rc)
1684 return ERR_PTR(rc);
1685
Wei Yongjun181d8012016-10-28 16:33:47 +00001686 if (attrs->srq)
1687 return ERR_PTR(-EINVAL);
1688
Ram Amranicecbcdd2016-10-10 13:15:34 +03001689 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001690 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1691 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001692 get_qedr_cq(attrs->send_cq),
1693 get_qedr_cq(attrs->send_cq)->icid,
1694 get_qedr_cq(attrs->recv_cq),
1695 get_qedr_cq(attrs->recv_cq)->icid);
1696
Amrani, Ramdf158562016-12-22 14:52:24 +02001697 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1698 if (!qp) {
1699 DP_ERR(dev, "create qp: failed allocating memory\n");
1700 return ERR_PTR(-ENOMEM);
1701 }
1702
1703 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001704
Ram Amrani04886772016-10-10 13:15:38 +03001705 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001706 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1707 if (IS_ERR(ibqp))
1708 kfree(qp);
1709 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001710 }
1711
Amrani, Ramdf158562016-12-22 14:52:24 +02001712 if (udata)
1713 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1714 else
1715 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001716
Amrani, Ramdf158562016-12-22 14:52:24 +02001717 if (rc)
1718 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001719
Ram Amranicecbcdd2016-10-10 13:15:34 +03001720 qp->ibqp.qp_num = qp->qp_id;
1721
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001722 rc = qedr_idr_add(dev, qp, qp->qp_id);
1723 if (rc)
1724 goto err;
1725
Ram Amranicecbcdd2016-10-10 13:15:34 +03001726 return &qp->ibqp;
1727
Amrani, Ramdf158562016-12-22 14:52:24 +02001728err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001729 kfree(qp);
1730
1731 return ERR_PTR(-EFAULT);
1732}
1733
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001734static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001735{
1736 switch (qp_state) {
1737 case QED_ROCE_QP_STATE_RESET:
1738 return IB_QPS_RESET;
1739 case QED_ROCE_QP_STATE_INIT:
1740 return IB_QPS_INIT;
1741 case QED_ROCE_QP_STATE_RTR:
1742 return IB_QPS_RTR;
1743 case QED_ROCE_QP_STATE_RTS:
1744 return IB_QPS_RTS;
1745 case QED_ROCE_QP_STATE_SQD:
1746 return IB_QPS_SQD;
1747 case QED_ROCE_QP_STATE_ERR:
1748 return IB_QPS_ERR;
1749 case QED_ROCE_QP_STATE_SQE:
1750 return IB_QPS_SQE;
1751 }
1752 return IB_QPS_ERR;
1753}
1754
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001755static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1756 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001757{
1758 switch (qp_state) {
1759 case IB_QPS_RESET:
1760 return QED_ROCE_QP_STATE_RESET;
1761 case IB_QPS_INIT:
1762 return QED_ROCE_QP_STATE_INIT;
1763 case IB_QPS_RTR:
1764 return QED_ROCE_QP_STATE_RTR;
1765 case IB_QPS_RTS:
1766 return QED_ROCE_QP_STATE_RTS;
1767 case IB_QPS_SQD:
1768 return QED_ROCE_QP_STATE_SQD;
1769 case IB_QPS_ERR:
1770 return QED_ROCE_QP_STATE_ERR;
1771 default:
1772 return QED_ROCE_QP_STATE_ERR;
1773 }
1774}
1775
1776static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1777{
1778 qed_chain_reset(&qph->pbl);
1779 qph->prod = 0;
1780 qph->cons = 0;
1781 qph->wqe_cons = 0;
1782 qph->db_data.data.value = cpu_to_le16(0);
1783}
1784
1785static int qedr_update_qp_state(struct qedr_dev *dev,
1786 struct qedr_qp *qp,
1787 enum qed_roce_qp_state new_state)
1788{
1789 int status = 0;
1790
1791 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001792 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001793
1794 switch (qp->state) {
1795 case QED_ROCE_QP_STATE_RESET:
1796 switch (new_state) {
1797 case QED_ROCE_QP_STATE_INIT:
1798 qp->prev_wqe_size = 0;
1799 qedr_reset_qp_hwq_info(&qp->sq);
1800 qedr_reset_qp_hwq_info(&qp->rq);
1801 break;
1802 default:
1803 status = -EINVAL;
1804 break;
1805 };
1806 break;
1807 case QED_ROCE_QP_STATE_INIT:
1808 switch (new_state) {
1809 case QED_ROCE_QP_STATE_RTR:
1810 /* Update doorbell (in case post_recv was
1811 * done before move to RTR)
1812 */
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001813
1814 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1815 wmb();
Sinan Kaya561e5d482018-03-13 23:20:24 -04001816 writel_relaxed(qp->rq.db_data.raw, qp->rq.db);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001817 /* Make sure write takes effect */
1818 mmiowb();
1819 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001820 break;
1821 case QED_ROCE_QP_STATE_ERR:
1822 break;
1823 default:
1824 /* Invalid state change. */
1825 status = -EINVAL;
1826 break;
1827 };
1828 break;
1829 case QED_ROCE_QP_STATE_RTR:
1830 /* RTR->XXX */
1831 switch (new_state) {
1832 case QED_ROCE_QP_STATE_RTS:
1833 break;
1834 case QED_ROCE_QP_STATE_ERR:
1835 break;
1836 default:
1837 /* Invalid state change. */
1838 status = -EINVAL;
1839 break;
1840 };
1841 break;
1842 case QED_ROCE_QP_STATE_RTS:
1843 /* RTS->XXX */
1844 switch (new_state) {
1845 case QED_ROCE_QP_STATE_SQD:
1846 break;
1847 case QED_ROCE_QP_STATE_ERR:
1848 break;
1849 default:
1850 /* Invalid state change. */
1851 status = -EINVAL;
1852 break;
1853 };
1854 break;
1855 case QED_ROCE_QP_STATE_SQD:
1856 /* SQD->XXX */
1857 switch (new_state) {
1858 case QED_ROCE_QP_STATE_RTS:
1859 case QED_ROCE_QP_STATE_ERR:
1860 break;
1861 default:
1862 /* Invalid state change. */
1863 status = -EINVAL;
1864 break;
1865 };
1866 break;
1867 case QED_ROCE_QP_STATE_ERR:
1868 /* ERR->XXX */
1869 switch (new_state) {
1870 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001871 if ((qp->rq.prod != qp->rq.cons) ||
1872 (qp->sq.prod != qp->sq.cons)) {
1873 DP_NOTICE(dev,
1874 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1875 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1876 qp->sq.cons);
1877 status = -EINVAL;
1878 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001879 break;
1880 default:
1881 status = -EINVAL;
1882 break;
1883 };
1884 break;
1885 default:
1886 status = -EINVAL;
1887 break;
1888 };
1889
1890 return status;
1891}
1892
1893int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1894 int attr_mask, struct ib_udata *udata)
1895{
1896 struct qedr_qp *qp = get_qedr_qp(ibqp);
1897 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1898 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001899 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001900 enum ib_qp_state old_qp_state, new_qp_state;
1901 int rc = 0;
1902
1903 DP_DEBUG(dev, QEDR_MSG_QP,
1904 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1905 attr->qp_state);
1906
1907 old_qp_state = qedr_get_ibqp_state(qp->state);
1908 if (attr_mask & IB_QP_STATE)
1909 new_qp_state = attr->qp_state;
1910 else
1911 new_qp_state = old_qp_state;
1912
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001913 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1914 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
1915 ibqp->qp_type, attr_mask,
1916 IB_LINK_LAYER_ETHERNET)) {
1917 DP_ERR(dev,
1918 "modify qp: invalid attribute mask=0x%x specified for\n"
1919 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1920 attr_mask, qp->qp_id, ibqp->qp_type,
1921 old_qp_state, new_qp_state);
1922 rc = -EINVAL;
1923 goto err;
1924 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001925 }
1926
1927 /* Translate the masks... */
1928 if (attr_mask & IB_QP_STATE) {
1929 SET_FIELD(qp_params.modify_flags,
1930 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1931 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1932 }
1933
1934 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1935 qp_params.sqd_async = true;
1936
1937 if (attr_mask & IB_QP_PKEY_INDEX) {
1938 SET_FIELD(qp_params.modify_flags,
1939 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1940 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1941 rc = -EINVAL;
1942 goto err;
1943 }
1944
1945 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1946 }
1947
1948 if (attr_mask & IB_QP_QKEY)
1949 qp->qkey = attr->qkey;
1950
1951 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1952 SET_FIELD(qp_params.modify_flags,
1953 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1954 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1955 IB_ACCESS_REMOTE_READ;
1956 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1957 IB_ACCESS_REMOTE_WRITE;
1958 qp_params.incoming_atomic_en = attr->qp_access_flags &
1959 IB_ACCESS_REMOTE_ATOMIC;
1960 }
1961
1962 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1963 if (attr_mask & IB_QP_PATH_MTU) {
1964 if (attr->path_mtu < IB_MTU_256 ||
1965 attr->path_mtu > IB_MTU_4096) {
1966 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1967 rc = -EINVAL;
1968 goto err;
1969 }
1970 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1971 ib_mtu_enum_to_int(iboe_get_mtu
1972 (dev->ndev->mtu)));
1973 }
1974
1975 if (!qp->mtu) {
1976 qp->mtu =
1977 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1978 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1979 }
1980
1981 SET_FIELD(qp_params.modify_flags,
1982 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1983
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001984 qp_params.traffic_class_tos = grh->traffic_class;
1985 qp_params.flow_label = grh->flow_label;
1986 qp_params.hop_limit_ttl = grh->hop_limit;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001987
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001988 qp->sgid_idx = grh->sgid_index;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001989
1990 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1991 if (rc) {
1992 DP_ERR(dev,
1993 "modify qp: problems with GID index %d (rc=%d)\n",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001994 grh->sgid_index, rc);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001995 return rc;
1996 }
1997
1998 rc = qedr_get_dmac(dev, &attr->ah_attr,
1999 qp_params.remote_mac_addr);
2000 if (rc)
2001 return rc;
2002
2003 qp_params.use_local_mac = true;
2004 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2005
2006 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2007 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2008 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2009 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2010 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2011 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2012 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2013 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002014
2015 qp_params.mtu = qp->mtu;
2016 qp_params.lb_indication = false;
2017 }
2018
2019 if (!qp_params.mtu) {
2020 /* Stay with current MTU */
2021 if (qp->mtu)
2022 qp_params.mtu = qp->mtu;
2023 else
2024 qp_params.mtu =
2025 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2026 }
2027
2028 if (attr_mask & IB_QP_TIMEOUT) {
2029 SET_FIELD(qp_params.modify_flags,
2030 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2031
2032 qp_params.ack_timeout = attr->timeout;
2033 if (attr->timeout) {
2034 u32 temp;
2035
2036 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
2037 /* FW requires [msec] */
2038 qp_params.ack_timeout = temp;
2039 } else {
2040 /* Infinite */
2041 qp_params.ack_timeout = 0;
2042 }
2043 }
2044 if (attr_mask & IB_QP_RETRY_CNT) {
2045 SET_FIELD(qp_params.modify_flags,
2046 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2047 qp_params.retry_cnt = attr->retry_cnt;
2048 }
2049
2050 if (attr_mask & IB_QP_RNR_RETRY) {
2051 SET_FIELD(qp_params.modify_flags,
2052 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2053 qp_params.rnr_retry_cnt = attr->rnr_retry;
2054 }
2055
2056 if (attr_mask & IB_QP_RQ_PSN) {
2057 SET_FIELD(qp_params.modify_flags,
2058 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2059 qp_params.rq_psn = attr->rq_psn;
2060 qp->rq_psn = attr->rq_psn;
2061 }
2062
2063 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2064 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2065 rc = -EINVAL;
2066 DP_ERR(dev,
2067 "unsupported max_rd_atomic=%d, supported=%d\n",
2068 attr->max_rd_atomic,
2069 dev->attr.max_qp_req_rd_atomic_resc);
2070 goto err;
2071 }
2072
2073 SET_FIELD(qp_params.modify_flags,
2074 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2075 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2076 }
2077
2078 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2079 SET_FIELD(qp_params.modify_flags,
2080 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2081 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2082 }
2083
2084 if (attr_mask & IB_QP_SQ_PSN) {
2085 SET_FIELD(qp_params.modify_flags,
2086 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2087 qp_params.sq_psn = attr->sq_psn;
2088 qp->sq_psn = attr->sq_psn;
2089 }
2090
2091 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2092 if (attr->max_dest_rd_atomic >
2093 dev->attr.max_qp_resp_rd_atomic_resc) {
2094 DP_ERR(dev,
2095 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2096 attr->max_dest_rd_atomic,
2097 dev->attr.max_qp_resp_rd_atomic_resc);
2098
2099 rc = -EINVAL;
2100 goto err;
2101 }
2102
2103 SET_FIELD(qp_params.modify_flags,
2104 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2105 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2106 }
2107
2108 if (attr_mask & IB_QP_DEST_QPN) {
2109 SET_FIELD(qp_params.modify_flags,
2110 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2111
2112 qp_params.dest_qp = attr->dest_qp_num;
2113 qp->dest_qp_num = attr->dest_qp_num;
2114 }
2115
2116 if (qp->qp_type != IB_QPT_GSI)
2117 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2118 qp->qed_qp, &qp_params);
2119
2120 if (attr_mask & IB_QP_STATE) {
2121 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02002122 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002123 qp->state = qp_params.new_state;
2124 }
2125
2126err:
2127 return rc;
2128}
2129
2130static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2131{
2132 int ib_qp_acc_flags = 0;
2133
2134 if (params->incoming_rdma_write_en)
2135 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2136 if (params->incoming_rdma_read_en)
2137 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2138 if (params->incoming_atomic_en)
2139 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2140 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2141 return ib_qp_acc_flags;
2142}
2143
2144int qedr_query_qp(struct ib_qp *ibqp,
2145 struct ib_qp_attr *qp_attr,
2146 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2147{
2148 struct qed_rdma_query_qp_out_params params;
2149 struct qedr_qp *qp = get_qedr_qp(ibqp);
2150 struct qedr_dev *dev = qp->dev;
2151 int rc = 0;
2152
2153 memset(&params, 0, sizeof(params));
2154
2155 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2156 if (rc)
2157 goto err;
2158
2159 memset(qp_attr, 0, sizeof(*qp_attr));
2160 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2161
2162 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2163 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002164 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002165 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2166 qp_attr->rq_psn = params.rq_psn;
2167 qp_attr->sq_psn = params.sq_psn;
2168 qp_attr->dest_qp_num = params.dest_qp;
2169
2170 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2171
2172 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2173 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2174 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2175 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002176 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002177 qp_init_attr->cap = qp_attr->cap;
2178
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04002179 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002180 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2181 params.flow_label, qp->sgid_idx,
2182 params.hop_limit_ttl, params.traffic_class_tos);
2183 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2184 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2185 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002186 qp_attr->timeout = params.timeout;
2187 qp_attr->rnr_retry = params.rnr_retry;
2188 qp_attr->retry_cnt = params.retry_cnt;
2189 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2190 qp_attr->pkey_index = params.pkey_index;
2191 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002192 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2193 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002194 qp_attr->alt_pkey_index = 0;
2195 qp_attr->alt_port_num = 0;
2196 qp_attr->alt_timeout = 0;
2197 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2198
2199 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2200 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2201 qp_attr->max_rd_atomic = params.max_rd_atomic;
2202 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2203
2204 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2205 qp_attr->cap.max_inline_data);
2206
2207err:
2208 return rc;
2209}
2210
Bart Van Assche00899852017-10-11 10:49:17 -07002211static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
Amrani, Ramdf158562016-12-22 14:52:24 +02002212{
2213 int rc = 0;
2214
2215 if (qp->qp_type != IB_QPT_GSI) {
2216 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2217 if (rc)
2218 return rc;
2219 }
2220
2221 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2222 qedr_cleanup_user(dev, qp);
2223 else
2224 qedr_cleanup_kernel(dev, qp);
2225
2226 return 0;
2227}
2228
Ram Amranicecbcdd2016-10-10 13:15:34 +03002229int qedr_destroy_qp(struct ib_qp *ibqp)
2230{
2231 struct qedr_qp *qp = get_qedr_qp(ibqp);
2232 struct qedr_dev *dev = qp->dev;
2233 struct ib_qp_attr attr;
2234 int attr_mask = 0;
2235 int rc = 0;
2236
2237 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2238 qp, qp->qp_type);
2239
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002240 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2241 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2242 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2243 (qp->state != QED_ROCE_QP_STATE_INIT)) {
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002244
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002245 attr.qp_state = IB_QPS_ERR;
2246 attr_mask |= IB_QP_STATE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002247
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002248 /* Change the QP state to ERROR */
2249 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2250 }
Kalderon, Michale411e052017-07-26 14:41:56 +03002251 } else {
2252 /* Wait for the connect/accept to complete */
2253 if (qp->ep) {
2254 int wait_count = 1;
2255
2256 while (qp->ep->during_connect) {
2257 DP_DEBUG(dev, QEDR_MSG_QP,
2258 "Still in during connect/accept\n");
2259
2260 msleep(100);
2261 if (wait_count++ > 200) {
2262 DP_NOTICE(dev,
2263 "during connect timeout\n");
2264 break;
2265 }
2266 }
2267 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002268 }
2269
Amrani, Ramdf158562016-12-22 14:52:24 +02002270 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002271 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002272
Amrani, Ramdf158562016-12-22 14:52:24 +02002273 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002274
Kalderon, Michalde0089e2017-07-26 14:41:55 +03002275 if (atomic_dec_and_test(&qp->refcnt)) {
2276 qedr_idr_remove(dev, qp->qp_id);
2277 kfree(qp);
2278 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002279 return rc;
2280}
Ram Amranie0290cc2016-10-10 13:15:35 +03002281
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002282struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002283 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002284{
2285 struct qedr_ah *ah;
2286
2287 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2288 if (!ah)
2289 return ERR_PTR(-ENOMEM);
2290
2291 ah->attr = *attr;
2292
2293 return &ah->ibah;
2294}
2295
2296int qedr_destroy_ah(struct ib_ah *ibah)
2297{
2298 struct qedr_ah *ah = get_qedr_ah(ibah);
2299
2300 kfree(ah);
2301 return 0;
2302}
2303
Ram Amranie0290cc2016-10-10 13:15:35 +03002304static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2305{
2306 struct qedr_pbl *pbl, *tmp;
2307
2308 if (info->pbl_table)
2309 list_add_tail(&info->pbl_table->list_entry,
2310 &info->free_pbl_list);
2311
2312 if (!list_empty(&info->inuse_pbl_list))
2313 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2314
2315 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2316 list_del(&pbl->list_entry);
2317 qedr_free_pbl(dev, &info->pbl_info, pbl);
2318 }
2319}
2320
2321static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2322 size_t page_list_len, bool two_layered)
2323{
2324 struct qedr_pbl *tmp;
2325 int rc;
2326
2327 INIT_LIST_HEAD(&info->free_pbl_list);
2328 INIT_LIST_HEAD(&info->inuse_pbl_list);
2329
2330 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2331 page_list_len, two_layered);
2332 if (rc)
2333 goto done;
2334
2335 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002336 if (IS_ERR(info->pbl_table)) {
2337 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002338 goto done;
2339 }
2340
2341 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2342 &info->pbl_table->pa);
2343
2344 /* in usual case we use 2 PBLs, so we add one to free
2345 * list and allocating another one
2346 */
2347 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002348 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002349 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2350 goto done;
2351 }
2352
2353 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2354
2355 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2356
2357done:
2358 if (rc)
2359 free_mr_info(dev, info);
2360
2361 return rc;
2362}
2363
2364struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2365 u64 usr_addr, int acc, struct ib_udata *udata)
2366{
2367 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2368 struct qedr_mr *mr;
2369 struct qedr_pd *pd;
2370 int rc = -ENOMEM;
2371
2372 pd = get_qedr_pd(ibpd);
2373 DP_DEBUG(dev, QEDR_MSG_MR,
2374 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2375 pd->pd_id, start, len, usr_addr, acc);
2376
2377 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2378 return ERR_PTR(-EINVAL);
2379
2380 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2381 if (!mr)
2382 return ERR_PTR(rc);
2383
2384 mr->type = QEDR_MR_USER;
2385
2386 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2387 if (IS_ERR(mr->umem)) {
2388 rc = -EFAULT;
2389 goto err0;
2390 }
2391
2392 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2393 if (rc)
2394 goto err1;
2395
2396 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
Ram Amranie57bb6b2017-06-05 16:32:27 +03002397 &mr->info.pbl_info, mr->umem->page_shift);
Ram Amranie0290cc2016-10-10 13:15:35 +03002398
2399 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2400 if (rc) {
2401 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2402 goto err1;
2403 }
2404
2405 /* Index only, 18 bit long, lkey = itid << 8 | key */
2406 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2407 mr->hw_mr.key = 0;
2408 mr->hw_mr.pd = pd->pd_id;
2409 mr->hw_mr.local_read = 1;
2410 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2411 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2412 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2413 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2414 mr->hw_mr.mw_bind = false;
2415 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2416 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2417 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002418 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002419 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2420 mr->hw_mr.length = len;
2421 mr->hw_mr.vaddr = usr_addr;
2422 mr->hw_mr.zbva = false;
2423 mr->hw_mr.phy_mr = false;
2424 mr->hw_mr.dma_mr = false;
2425
2426 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2427 if (rc) {
2428 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2429 goto err2;
2430 }
2431
2432 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2433 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2434 mr->hw_mr.remote_atomic)
2435 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2436
2437 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2438 mr->ibmr.lkey);
2439 return &mr->ibmr;
2440
2441err2:
2442 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2443err1:
2444 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2445err0:
2446 kfree(mr);
2447 return ERR_PTR(rc);
2448}
2449
2450int qedr_dereg_mr(struct ib_mr *ib_mr)
2451{
2452 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2453 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2454 int rc = 0;
2455
2456 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2457 if (rc)
2458 return rc;
2459
2460 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2461
2462 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2463 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2464
2465 /* it could be user registered memory. */
2466 if (mr->umem)
2467 ib_umem_release(mr->umem);
2468
2469 kfree(mr);
2470
2471 return rc;
2472}
2473
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002474static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2475 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002476{
2477 struct qedr_pd *pd = get_qedr_pd(ibpd);
2478 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2479 struct qedr_mr *mr;
2480 int rc = -ENOMEM;
2481
2482 DP_DEBUG(dev, QEDR_MSG_MR,
2483 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2484 max_page_list_len);
2485
2486 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2487 if (!mr)
2488 return ERR_PTR(rc);
2489
2490 mr->dev = dev;
2491 mr->type = QEDR_MR_FRMR;
2492
2493 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2494 if (rc)
2495 goto err0;
2496
2497 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2498 if (rc) {
2499 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2500 goto err0;
2501 }
2502
2503 /* Index only, 18 bit long, lkey = itid << 8 | key */
2504 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2505 mr->hw_mr.key = 0;
2506 mr->hw_mr.pd = pd->pd_id;
2507 mr->hw_mr.local_read = 1;
2508 mr->hw_mr.local_write = 0;
2509 mr->hw_mr.remote_read = 0;
2510 mr->hw_mr.remote_write = 0;
2511 mr->hw_mr.remote_atomic = 0;
2512 mr->hw_mr.mw_bind = false;
2513 mr->hw_mr.pbl_ptr = 0;
2514 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2515 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2516 mr->hw_mr.fbo = 0;
2517 mr->hw_mr.length = 0;
2518 mr->hw_mr.vaddr = 0;
2519 mr->hw_mr.zbva = false;
2520 mr->hw_mr.phy_mr = true;
2521 mr->hw_mr.dma_mr = false;
2522
2523 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2524 if (rc) {
2525 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2526 goto err1;
2527 }
2528
2529 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2530 mr->ibmr.rkey = mr->ibmr.lkey;
2531
2532 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2533 return mr;
2534
2535err1:
2536 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2537err0:
2538 kfree(mr);
2539 return ERR_PTR(rc);
2540}
2541
2542struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2543 enum ib_mr_type mr_type, u32 max_num_sg)
2544{
Ram Amranie0290cc2016-10-10 13:15:35 +03002545 struct qedr_mr *mr;
2546
2547 if (mr_type != IB_MR_TYPE_MEM_REG)
2548 return ERR_PTR(-EINVAL);
2549
2550 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2551
2552 if (IS_ERR(mr))
2553 return ERR_PTR(-EINVAL);
2554
Ram Amranie0290cc2016-10-10 13:15:35 +03002555 return &mr->ibmr;
2556}
2557
2558static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2559{
2560 struct qedr_mr *mr = get_qedr_mr(ibmr);
2561 struct qedr_pbl *pbl_table;
2562 struct regpair *pbe;
2563 u32 pbes_in_page;
2564
2565 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2566 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2567 return -ENOMEM;
2568 }
2569
2570 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2571 mr->npages, addr);
2572
2573 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2574 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2575 pbe = (struct regpair *)pbl_table->va;
2576 pbe += mr->npages % pbes_in_page;
2577 pbe->lo = cpu_to_le32((u32)addr);
2578 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2579
2580 mr->npages++;
2581
2582 return 0;
2583}
2584
2585static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2586{
2587 int work = info->completed - info->completed_handled - 1;
2588
2589 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2590 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2591 struct qedr_pbl *pbl;
2592
2593 /* Free all the page list that are possible to be freed
2594 * (all the ones that were invalidated), under the assumption
2595 * that if an FMR was completed successfully that means that
2596 * if there was an invalidate operation before it also ended
2597 */
2598 pbl = list_first_entry(&info->inuse_pbl_list,
2599 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002600 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002601 info->completed_handled++;
2602 }
2603}
2604
2605int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2606 int sg_nents, unsigned int *sg_offset)
2607{
2608 struct qedr_mr *mr = get_qedr_mr(ibmr);
2609
2610 mr->npages = 0;
2611
2612 handle_completed_mrs(mr->dev, &mr->info);
2613 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2614}
2615
2616struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2617{
2618 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2619 struct qedr_pd *pd = get_qedr_pd(ibpd);
2620 struct qedr_mr *mr;
2621 int rc;
2622
2623 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2624 if (!mr)
2625 return ERR_PTR(-ENOMEM);
2626
2627 mr->type = QEDR_MR_DMA;
2628
2629 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2630 if (rc) {
2631 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2632 goto err1;
2633 }
2634
2635 /* index only, 18 bit long, lkey = itid << 8 | key */
2636 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2637 mr->hw_mr.pd = pd->pd_id;
2638 mr->hw_mr.local_read = 1;
2639 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2640 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2641 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2642 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2643 mr->hw_mr.dma_mr = true;
2644
2645 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2646 if (rc) {
2647 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2648 goto err2;
2649 }
2650
2651 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2652 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2653 mr->hw_mr.remote_atomic)
2654 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2655
2656 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2657 return &mr->ibmr;
2658
2659err2:
2660 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2661err1:
2662 kfree(mr);
2663 return ERR_PTR(rc);
2664}
Ram Amraniafa0e132016-10-10 13:15:36 +03002665
2666static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2667{
2668 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2669}
2670
2671static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2672{
2673 int i, len = 0;
2674
2675 for (i = 0; i < num_sge; i++)
2676 len += sg_list[i].length;
2677
2678 return len;
2679}
2680
2681static void swap_wqe_data64(u64 *p)
2682{
2683 int i;
2684
2685 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2686 *p = cpu_to_be64(cpu_to_le64(*p));
2687}
2688
2689static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2690 struct qedr_qp *qp, u8 *wqe_size,
2691 struct ib_send_wr *wr,
2692 struct ib_send_wr **bad_wr, u8 *bits,
2693 u8 bit)
2694{
2695 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2696 char *seg_prt, *wqe;
2697 int i, seg_siz;
2698
2699 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2700 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2701 *bad_wr = wr;
2702 return 0;
2703 }
2704
2705 if (!data_size)
2706 return data_size;
2707
2708 *bits |= bit;
2709
2710 seg_prt = NULL;
2711 wqe = NULL;
2712 seg_siz = 0;
2713
2714 /* Copy data inline */
2715 for (i = 0; i < wr->num_sge; i++) {
2716 u32 len = wr->sg_list[i].length;
2717 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2718
2719 while (len > 0) {
2720 u32 cur;
2721
2722 /* New segment required */
2723 if (!seg_siz) {
2724 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2725 seg_prt = wqe;
2726 seg_siz = sizeof(struct rdma_sq_common_wqe);
2727 (*wqe_size)++;
2728 }
2729
2730 /* Calculate currently allowed length */
2731 cur = min_t(u32, len, seg_siz);
2732 memcpy(seg_prt, src, cur);
2733
2734 /* Update segment variables */
2735 seg_prt += cur;
2736 seg_siz -= cur;
2737
2738 /* Update sge variables */
2739 src += cur;
2740 len -= cur;
2741
2742 /* Swap fully-completed segments */
2743 if (!seg_siz)
2744 swap_wqe_data64((u64 *)wqe);
2745 }
2746 }
2747
2748 /* swap last not completed segment */
2749 if (seg_siz)
2750 swap_wqe_data64((u64 *)wqe);
2751
2752 return data_size;
2753}
2754
2755#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2756 do { \
2757 DMA_REGPAIR_LE(sge->addr, vaddr); \
2758 (sge)->length = cpu_to_le32(vlength); \
2759 (sge)->flags = cpu_to_le32(vflags); \
2760 } while (0)
2761
2762#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2763 do { \
2764 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2765 (hdr)->num_sges = num_sge; \
2766 } while (0)
2767
2768#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2769 do { \
2770 DMA_REGPAIR_LE(sge->addr, vaddr); \
2771 (sge)->length = cpu_to_le32(vlength); \
2772 (sge)->l_key = cpu_to_le32(vlkey); \
2773 } while (0)
2774
2775static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2776 struct ib_send_wr *wr)
2777{
2778 u32 data_size = 0;
2779 int i;
2780
2781 for (i = 0; i < wr->num_sge; i++) {
2782 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2783
2784 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2785 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2786 sge->length = cpu_to_le32(wr->sg_list[i].length);
2787 data_size += wr->sg_list[i].length;
2788 }
2789
2790 if (wqe_size)
2791 *wqe_size += wr->num_sge;
2792
2793 return data_size;
2794}
2795
2796static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2797 struct qedr_qp *qp,
2798 struct rdma_sq_rdma_wqe_1st *rwqe,
2799 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2800 struct ib_send_wr *wr,
2801 struct ib_send_wr **bad_wr)
2802{
2803 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2804 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2805
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002806 if (wr->send_flags & IB_SEND_INLINE &&
2807 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2808 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002809 u8 flags = 0;
2810
2811 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2812 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2813 bad_wr, &rwqe->flags, flags);
2814 }
2815
2816 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2817}
2818
2819static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2820 struct qedr_qp *qp,
2821 struct rdma_sq_send_wqe_1st *swqe,
2822 struct rdma_sq_send_wqe_2st *swqe2,
2823 struct ib_send_wr *wr,
2824 struct ib_send_wr **bad_wr)
2825{
2826 memset(swqe2, 0, sizeof(*swqe2));
2827 if (wr->send_flags & IB_SEND_INLINE) {
2828 u8 flags = 0;
2829
2830 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2831 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2832 bad_wr, &swqe->flags, flags);
2833 }
2834
2835 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2836}
2837
2838static int qedr_prepare_reg(struct qedr_qp *qp,
2839 struct rdma_sq_fmr_wqe_1st *fwqe1,
2840 struct ib_reg_wr *wr)
2841{
2842 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2843 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2844
2845 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2846 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2847 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2848 fwqe1->l_key = wr->key;
2849
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002850 fwqe2->access_ctrl = 0;
2851
Ram Amraniafa0e132016-10-10 13:15:36 +03002852 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2853 !!(wr->access & IB_ACCESS_REMOTE_READ));
2854 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2855 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2856 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2857 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2858 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2859 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2860 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2861 fwqe2->fmr_ctrl = 0;
2862
2863 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2864 ilog2(mr->ibmr.page_size) - 12);
2865
2866 fwqe2->length_hi = 0;
2867 fwqe2->length_lo = mr->ibmr.length;
2868 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2869 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2870
2871 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2872
2873 return 0;
2874}
2875
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002876static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002877{
2878 switch (opcode) {
2879 case IB_WR_RDMA_WRITE:
2880 case IB_WR_RDMA_WRITE_WITH_IMM:
2881 return IB_WC_RDMA_WRITE;
2882 case IB_WR_SEND_WITH_IMM:
2883 case IB_WR_SEND:
2884 case IB_WR_SEND_WITH_INV:
2885 return IB_WC_SEND;
2886 case IB_WR_RDMA_READ:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03002887 case IB_WR_RDMA_READ_WITH_INV:
Ram Amraniafa0e132016-10-10 13:15:36 +03002888 return IB_WC_RDMA_READ;
2889 case IB_WR_ATOMIC_CMP_AND_SWP:
2890 return IB_WC_COMP_SWAP;
2891 case IB_WR_ATOMIC_FETCH_AND_ADD:
2892 return IB_WC_FETCH_ADD;
2893 case IB_WR_REG_MR:
2894 return IB_WC_REG_MR;
2895 case IB_WR_LOCAL_INV:
2896 return IB_WC_LOCAL_INV;
2897 default:
2898 return IB_WC_SEND;
2899 }
2900}
2901
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002902static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002903{
2904 int wq_is_full, err_wr, pbl_is_full;
2905 struct qedr_dev *dev = qp->dev;
2906
2907 /* prevent SQ overflow and/or processing of a bad WR */
2908 err_wr = wr->num_sge > qp->sq.max_sges;
2909 wq_is_full = qedr_wq_is_full(&qp->sq);
2910 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2911 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2912 if (wq_is_full || err_wr || pbl_is_full) {
2913 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2914 DP_ERR(dev,
2915 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2916 qp);
2917 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2918 }
2919
2920 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2921 DP_ERR(dev,
2922 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2923 qp);
2924 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2925 }
2926
2927 if (pbl_is_full &&
2928 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2929 DP_ERR(dev,
2930 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2931 qp);
2932 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2933 }
2934 return false;
2935 }
2936 return true;
2937}
2938
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002939static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03002940 struct ib_send_wr **bad_wr)
2941{
2942 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2943 struct qedr_qp *qp = get_qedr_qp(ibqp);
2944 struct rdma_sq_atomic_wqe_1st *awqe1;
2945 struct rdma_sq_atomic_wqe_2nd *awqe2;
2946 struct rdma_sq_atomic_wqe_3rd *awqe3;
2947 struct rdma_sq_send_wqe_2st *swqe2;
2948 struct rdma_sq_local_inv_wqe *iwqe;
2949 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2950 struct rdma_sq_send_wqe_1st *swqe;
2951 struct rdma_sq_rdma_wqe_1st *rwqe;
2952 struct rdma_sq_fmr_wqe_1st *fwqe1;
2953 struct rdma_sq_common_wqe *wqe;
2954 u32 length;
2955 int rc = 0;
2956 bool comp;
2957
2958 if (!qedr_can_post_send(qp, wr)) {
2959 *bad_wr = wr;
2960 return -ENOMEM;
2961 }
2962
2963 wqe = qed_chain_produce(&qp->sq.pbl);
2964 qp->wqe_wr_id[qp->sq.prod].signaled =
2965 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2966
2967 wqe->flags = 0;
2968 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2969 !!(wr->send_flags & IB_SEND_SOLICITED));
2970 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2971 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2972 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2973 !!(wr->send_flags & IB_SEND_FENCE));
2974 wqe->prev_wqe_size = qp->prev_wqe_size;
2975
2976 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2977
2978 switch (wr->opcode) {
2979 case IB_WR_SEND_WITH_IMM:
Kalderon, Michal551e1c62018-03-05 10:50:11 +02002980 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
2981 rc = -EINVAL;
2982 *bad_wr = wr;
2983 break;
2984 }
Ram Amraniafa0e132016-10-10 13:15:36 +03002985 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2986 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2987 swqe->wqe_size = 2;
2988 swqe2 = qed_chain_produce(&qp->sq.pbl);
2989
Jason Gunthorpe7bed7eb2018-01-11 14:43:07 -07002990 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
Ram Amraniafa0e132016-10-10 13:15:36 +03002991 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2992 wr, bad_wr);
2993 swqe->length = cpu_to_le32(length);
2994 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2995 qp->prev_wqe_size = swqe->wqe_size;
2996 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2997 break;
2998 case IB_WR_SEND:
2999 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3000 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3001
3002 swqe->wqe_size = 2;
3003 swqe2 = qed_chain_produce(&qp->sq.pbl);
3004 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3005 wr, bad_wr);
3006 swqe->length = cpu_to_le32(length);
3007 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3008 qp->prev_wqe_size = swqe->wqe_size;
3009 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3010 break;
3011 case IB_WR_SEND_WITH_INV:
3012 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3013 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3014 swqe2 = qed_chain_produce(&qp->sq.pbl);
3015 swqe->wqe_size = 2;
3016 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3017 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3018 wr, bad_wr);
3019 swqe->length = cpu_to_le32(length);
3020 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3021 qp->prev_wqe_size = swqe->wqe_size;
3022 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3023 break;
3024
3025 case IB_WR_RDMA_WRITE_WITH_IMM:
Kalderon, Michal551e1c62018-03-05 10:50:11 +02003026 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3027 rc = -EINVAL;
3028 *bad_wr = wr;
3029 break;
3030 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003031 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3032 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3033
3034 rwqe->wqe_size = 2;
3035 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3036 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3037 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3038 wr, bad_wr);
3039 rwqe->length = cpu_to_le32(length);
3040 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3041 qp->prev_wqe_size = rwqe->wqe_size;
3042 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3043 break;
3044 case IB_WR_RDMA_WRITE:
3045 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3046 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3047
3048 rwqe->wqe_size = 2;
3049 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3050 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3051 wr, bad_wr);
3052 rwqe->length = cpu_to_le32(length);
3053 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3054 qp->prev_wqe_size = rwqe->wqe_size;
3055 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3056 break;
3057 case IB_WR_RDMA_READ_WITH_INV:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03003058 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
Bart Van Assche1b8a708b2017-10-11 10:49:19 -07003059 /* fallthrough -- same is identical to RDMA READ */
Ram Amraniafa0e132016-10-10 13:15:36 +03003060
3061 case IB_WR_RDMA_READ:
3062 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3063 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3064
3065 rwqe->wqe_size = 2;
3066 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3067 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3068 wr, bad_wr);
3069 rwqe->length = cpu_to_le32(length);
3070 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3071 qp->prev_wqe_size = rwqe->wqe_size;
3072 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3073 break;
3074
3075 case IB_WR_ATOMIC_CMP_AND_SWP:
3076 case IB_WR_ATOMIC_FETCH_AND_ADD:
3077 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3078 awqe1->wqe_size = 4;
3079
3080 awqe2 = qed_chain_produce(&qp->sq.pbl);
3081 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3082 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3083
3084 awqe3 = qed_chain_produce(&qp->sq.pbl);
3085
3086 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3087 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3088 DMA_REGPAIR_LE(awqe3->swap_data,
3089 atomic_wr(wr)->compare_add);
3090 } else {
3091 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3092 DMA_REGPAIR_LE(awqe3->swap_data,
3093 atomic_wr(wr)->swap);
3094 DMA_REGPAIR_LE(awqe3->cmp_data,
3095 atomic_wr(wr)->compare_add);
3096 }
3097
3098 qedr_prepare_sq_sges(qp, NULL, wr);
3099
3100 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3101 qp->prev_wqe_size = awqe1->wqe_size;
3102 break;
3103
3104 case IB_WR_LOCAL_INV:
3105 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3106 iwqe->wqe_size = 1;
3107
3108 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3109 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3110 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3111 qp->prev_wqe_size = iwqe->wqe_size;
3112 break;
3113 case IB_WR_REG_MR:
3114 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3115 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3116 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3117 fwqe1->wqe_size = 2;
3118
3119 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3120 if (rc) {
3121 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3122 *bad_wr = wr;
3123 break;
3124 }
3125
3126 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3127 qp->prev_wqe_size = fwqe1->wqe_size;
3128 break;
3129 default:
3130 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3131 rc = -EINVAL;
3132 *bad_wr = wr;
3133 break;
3134 }
3135
3136 if (*bad_wr) {
3137 u16 value;
3138
3139 /* Restore prod to its position before
3140 * this WR was processed
3141 */
3142 value = le16_to_cpu(qp->sq.db_data.data.value);
3143 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3144
3145 /* Restore prev_wqe_size */
3146 qp->prev_wqe_size = wqe->prev_wqe_size;
3147 rc = -EINVAL;
3148 DP_ERR(dev, "POST SEND FAILED\n");
3149 }
3150
3151 return rc;
3152}
3153
3154int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3155 struct ib_send_wr **bad_wr)
3156{
3157 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3158 struct qedr_qp *qp = get_qedr_qp(ibqp);
3159 unsigned long flags;
3160 int rc = 0;
3161
3162 *bad_wr = NULL;
3163
Ram Amrani04886772016-10-10 13:15:38 +03003164 if (qp->qp_type == IB_QPT_GSI)
3165 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3166
Ram Amraniafa0e132016-10-10 13:15:36 +03003167 spin_lock_irqsave(&qp->q_lock, flags);
3168
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003169 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3170 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3171 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3172 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3173 spin_unlock_irqrestore(&qp->q_lock, flags);
3174 *bad_wr = wr;
3175 DP_DEBUG(dev, QEDR_MSG_CQ,
3176 "QP in wrong state! QP icid=0x%x state %d\n",
3177 qp->icid, qp->state);
3178 return -EINVAL;
3179 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003180 }
3181
Ram Amraniafa0e132016-10-10 13:15:36 +03003182 while (wr) {
3183 rc = __qedr_post_send(ibqp, wr, bad_wr);
3184 if (rc)
3185 break;
3186
3187 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3188
3189 qedr_inc_sw_prod(&qp->sq);
3190
3191 qp->sq.db_data.data.value++;
3192
3193 wr = wr->next;
3194 }
3195
3196 /* Trigger doorbell
3197 * If there was a failure in the first WR then it will be triggered in
3198 * vane. However this is not harmful (as long as the producer value is
3199 * unchanged). For performance reasons we avoid checking for this
3200 * redundant doorbell.
3201 */
3202 wmb();
Sinan Kaya561e5d482018-03-13 23:20:24 -04003203 writel_relaxed(qp->sq.db_data.raw, qp->sq.db);
Ram Amraniafa0e132016-10-10 13:15:36 +03003204
3205 /* Make sure write sticks */
3206 mmiowb();
3207
3208 spin_unlock_irqrestore(&qp->q_lock, flags);
3209
3210 return rc;
3211}
3212
3213int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3214 struct ib_recv_wr **bad_wr)
3215{
3216 struct qedr_qp *qp = get_qedr_qp(ibqp);
3217 struct qedr_dev *dev = qp->dev;
3218 unsigned long flags;
3219 int status = 0;
3220
Ram Amrani04886772016-10-10 13:15:38 +03003221 if (qp->qp_type == IB_QPT_GSI)
3222 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3223
Ram Amraniafa0e132016-10-10 13:15:36 +03003224 spin_lock_irqsave(&qp->q_lock, flags);
3225
Amrani, Ram922d9a42016-12-22 14:40:38 +02003226 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003227 spin_unlock_irqrestore(&qp->q_lock, flags);
3228 *bad_wr = wr;
3229 return -EINVAL;
3230 }
3231
3232 while (wr) {
3233 int i;
3234
3235 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3236 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3237 wr->num_sge > qp->rq.max_sges) {
3238 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3239 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3240 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3241 qp->rq.max_sges);
3242 status = -ENOMEM;
3243 *bad_wr = wr;
3244 break;
3245 }
3246 for (i = 0; i < wr->num_sge; i++) {
3247 u32 flags = 0;
3248 struct rdma_rq_sge *rqe =
3249 qed_chain_produce(&qp->rq.pbl);
3250
3251 /* First one must include the number
3252 * of SGE in the list
3253 */
3254 if (!i)
3255 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3256 wr->num_sge);
3257
3258 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3259 wr->sg_list[i].lkey);
3260
3261 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3262 wr->sg_list[i].length, flags);
3263 }
3264
3265 /* Special case of no sges. FW requires between 1-4 sges...
3266 * in this case we need to post 1 sge with length zero. this is
3267 * because rdma write with immediate consumes an RQ.
3268 */
3269 if (!wr->num_sge) {
3270 u32 flags = 0;
3271 struct rdma_rq_sge *rqe =
3272 qed_chain_produce(&qp->rq.pbl);
3273
3274 /* First one must include the number
3275 * of SGE in the list
3276 */
3277 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3278 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3279
3280 RQ_SGE_SET(rqe, 0, 0, flags);
3281 i = 1;
3282 }
3283
3284 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3285 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3286
3287 qedr_inc_sw_prod(&qp->rq);
3288
3289 /* Flush all the writes before signalling doorbell */
3290 wmb();
3291
3292 qp->rq.db_data.data.value++;
3293
3294 writel(qp->rq.db_data.raw, qp->rq.db);
3295
3296 /* Make sure write sticks */
3297 mmiowb();
3298
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003299 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3300 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3301 mmiowb(); /* for second doorbell */
3302 }
3303
Ram Amraniafa0e132016-10-10 13:15:36 +03003304 wr = wr->next;
3305 }
3306
3307 spin_unlock_irqrestore(&qp->q_lock, flags);
3308
3309 return status;
3310}
3311
3312static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3313{
3314 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3315
3316 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3317 cq->pbl_toggle;
3318}
3319
3320static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3321{
3322 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3323 struct qedr_qp *qp;
3324
3325 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3326 resp_cqe->qp_handle.lo,
3327 u64);
3328 return qp;
3329}
3330
3331static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3332{
3333 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3334
3335 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3336}
3337
3338/* Return latest CQE (needs processing) */
3339static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3340{
3341 return cq->latest_cqe;
3342}
3343
3344/* In fmr we need to increase the number of fmr completed counter for the fmr
3345 * algorithm determining whether we can free a pbl or not.
3346 * we need to perform this whether the work request was signaled or not. for
3347 * this purpose we call this function from the condition that checks if a wr
3348 * should be skipped, to make sure we don't miss it ( possibly this fmr
3349 * operation was not signalted)
3350 */
3351static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3352{
3353 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3354 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3355}
3356
3357static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3358 struct qedr_cq *cq, int num_entries,
3359 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3360 int force)
3361{
3362 u16 cnt = 0;
3363
3364 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3365 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3366 qedr_chk_if_fmr(qp);
3367 /* skip WC */
3368 goto next_cqe;
3369 }
3370
3371 /* fill WC */
3372 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003373 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003374 wc->wc_flags = 0;
3375 wc->src_qp = qp->id;
3376 wc->qp = &qp->ibqp;
3377
3378 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3379 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3380
3381 switch (wc->opcode) {
3382 case IB_WC_RDMA_WRITE:
3383 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3384 break;
3385 case IB_WC_COMP_SWAP:
3386 case IB_WC_FETCH_ADD:
3387 wc->byte_len = 8;
3388 break;
3389 case IB_WC_REG_MR:
3390 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3391 break;
Michal Kalderondac27382017-06-05 16:32:26 +03003392 case IB_WC_RDMA_READ:
3393 case IB_WC_SEND:
3394 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3395 break;
Ram Amraniafa0e132016-10-10 13:15:36 +03003396 default:
3397 break;
3398 }
3399
3400 num_entries--;
3401 wc++;
3402 cnt++;
3403next_cqe:
3404 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3405 qed_chain_consume(&qp->sq.pbl);
3406 qedr_inc_sw_cons(&qp->sq);
3407 }
3408
3409 return cnt;
3410}
3411
3412static int qedr_poll_cq_req(struct qedr_dev *dev,
3413 struct qedr_qp *qp, struct qedr_cq *cq,
3414 int num_entries, struct ib_wc *wc,
3415 struct rdma_cqe_requester *req)
3416{
3417 int cnt = 0;
3418
3419 switch (req->status) {
3420 case RDMA_CQE_REQ_STS_OK:
3421 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3422 IB_WC_SUCCESS, 0);
3423 break;
3424 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003425 if (qp->state != QED_ROCE_QP_STATE_ERR)
Kalderon, Michaldc728f72018-01-25 13:23:20 +02003426 DP_DEBUG(dev, QEDR_MSG_CQ,
3427 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3428 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003429 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003430 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003431 break;
3432 default:
3433 /* process all WQE before the cosumer */
3434 qp->state = QED_ROCE_QP_STATE_ERR;
3435 cnt = process_req(dev, qp, cq, num_entries, wc,
3436 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3437 wc += cnt;
3438 /* if we have extra WC fill it with actual error info */
3439 if (cnt < num_entries) {
3440 enum ib_wc_status wc_status;
3441
3442 switch (req->status) {
3443 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3444 DP_ERR(dev,
3445 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3446 cq->icid, qp->icid);
3447 wc_status = IB_WC_BAD_RESP_ERR;
3448 break;
3449 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3450 DP_ERR(dev,
3451 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3452 cq->icid, qp->icid);
3453 wc_status = IB_WC_LOC_LEN_ERR;
3454 break;
3455 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3456 DP_ERR(dev,
3457 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3458 cq->icid, qp->icid);
3459 wc_status = IB_WC_LOC_QP_OP_ERR;
3460 break;
3461 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3462 DP_ERR(dev,
3463 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3464 cq->icid, qp->icid);
3465 wc_status = IB_WC_LOC_PROT_ERR;
3466 break;
3467 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3468 DP_ERR(dev,
3469 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3470 cq->icid, qp->icid);
3471 wc_status = IB_WC_MW_BIND_ERR;
3472 break;
3473 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3474 DP_ERR(dev,
3475 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3476 cq->icid, qp->icid);
3477 wc_status = IB_WC_REM_INV_REQ_ERR;
3478 break;
3479 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3480 DP_ERR(dev,
3481 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3482 cq->icid, qp->icid);
3483 wc_status = IB_WC_REM_ACCESS_ERR;
3484 break;
3485 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3486 DP_ERR(dev,
3487 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3488 cq->icid, qp->icid);
3489 wc_status = IB_WC_REM_OP_ERR;
3490 break;
3491 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3492 DP_ERR(dev,
3493 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3494 cq->icid, qp->icid);
3495 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3496 break;
3497 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3498 DP_ERR(dev,
3499 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3500 cq->icid, qp->icid);
3501 wc_status = IB_WC_RETRY_EXC_ERR;
3502 break;
3503 default:
3504 DP_ERR(dev,
3505 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3506 cq->icid, qp->icid);
3507 wc_status = IB_WC_GENERAL_ERR;
3508 }
3509 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3510 wc_status, 1);
3511 }
3512 }
3513
3514 return cnt;
3515}
3516
Amrani, Ramb6acd712017-04-27 13:35:35 +03003517static inline int qedr_cqe_resp_status_to_ib(u8 status)
3518{
3519 switch (status) {
3520 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3521 return IB_WC_LOC_ACCESS_ERR;
3522 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3523 return IB_WC_LOC_LEN_ERR;
3524 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3525 return IB_WC_LOC_QP_OP_ERR;
3526 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3527 return IB_WC_LOC_PROT_ERR;
3528 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3529 return IB_WC_MW_BIND_ERR;
3530 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3531 return IB_WC_REM_INV_RD_REQ_ERR;
3532 case RDMA_CQE_RESP_STS_OK:
3533 return IB_WC_SUCCESS;
3534 default:
3535 return IB_WC_GENERAL_ERR;
3536 }
3537}
3538
3539static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3540 struct ib_wc *wc)
3541{
3542 wc->status = IB_WC_SUCCESS;
3543 wc->byte_len = le32_to_cpu(resp->length);
3544
3545 if (resp->flags & QEDR_RESP_IMM) {
Jason Gunthorpe7bed7eb2018-01-11 14:43:07 -07003546 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
Amrani, Ramb6acd712017-04-27 13:35:35 +03003547 wc->wc_flags |= IB_WC_WITH_IMM;
3548
3549 if (resp->flags & QEDR_RESP_RDMA)
3550 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3551
3552 if (resp->flags & QEDR_RESP_INV)
3553 return -EINVAL;
3554
3555 } else if (resp->flags & QEDR_RESP_INV) {
3556 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3557 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3558
3559 if (resp->flags & QEDR_RESP_RDMA)
3560 return -EINVAL;
3561
3562 } else if (resp->flags & QEDR_RESP_RDMA) {
3563 return -EINVAL;
3564 }
3565
3566 return 0;
3567}
3568
Ram Amraniafa0e132016-10-10 13:15:36 +03003569static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3570 struct qedr_cq *cq, struct ib_wc *wc,
3571 struct rdma_cqe_responder *resp, u64 wr_id)
3572{
Amrani, Ramb6acd712017-04-27 13:35:35 +03003573 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
Ram Amraniafa0e132016-10-10 13:15:36 +03003574 wc->opcode = IB_WC_RECV;
3575 wc->wc_flags = 0;
3576
Amrani, Ramb6acd712017-04-27 13:35:35 +03003577 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3578 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3579 DP_ERR(dev,
3580 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3581 cq, cq->icid, resp->flags);
Ram Amraniafa0e132016-10-10 13:15:36 +03003582
Amrani, Ramb6acd712017-04-27 13:35:35 +03003583 } else {
3584 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3585 if (wc->status == IB_WC_GENERAL_ERR)
3586 DP_ERR(dev,
3587 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3588 cq, cq->icid, resp->status);
Ram Amraniafa0e132016-10-10 13:15:36 +03003589 }
3590
Amrani, Ramb6acd712017-04-27 13:35:35 +03003591 /* Fill the rest of the WC */
Amrani, Ram27035a12016-12-22 14:40:39 +02003592 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003593 wc->src_qp = qp->id;
3594 wc->qp = &qp->ibqp;
3595 wc->wr_id = wr_id;
3596}
3597
3598static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3599 struct qedr_cq *cq, struct ib_wc *wc,
3600 struct rdma_cqe_responder *resp)
3601{
3602 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3603
3604 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3605
3606 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3607 qed_chain_consume(&qp->rq.pbl);
3608 qedr_inc_sw_cons(&qp->rq);
3609
3610 return 1;
3611}
3612
3613static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3614 int num_entries, struct ib_wc *wc, u16 hw_cons)
3615{
3616 u16 cnt = 0;
3617
3618 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3619 /* fill WC */
3620 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003621 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003622 wc->wc_flags = 0;
3623 wc->src_qp = qp->id;
3624 wc->byte_len = 0;
3625 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3626 wc->qp = &qp->ibqp;
3627 num_entries--;
3628 wc++;
3629 cnt++;
3630 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3631 qed_chain_consume(&qp->rq.pbl);
3632 qedr_inc_sw_cons(&qp->rq);
3633 }
3634
3635 return cnt;
3636}
3637
3638static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3639 struct rdma_cqe_responder *resp, int *update)
3640{
3641 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3642 consume_cqe(cq);
3643 *update |= 1;
3644 }
3645}
3646
3647static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3648 struct qedr_cq *cq, int num_entries,
3649 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3650 int *update)
3651{
3652 int cnt;
3653
3654 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3655 cnt = process_resp_flush(qp, cq, num_entries, wc,
3656 resp->rq_cons);
3657 try_consume_resp_cqe(cq, qp, resp, update);
3658 } else {
3659 cnt = process_resp_one(dev, qp, cq, wc, resp);
3660 consume_cqe(cq);
3661 *update |= 1;
3662 }
3663
3664 return cnt;
3665}
3666
3667static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3668 struct rdma_cqe_requester *req, int *update)
3669{
3670 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3671 consume_cqe(cq);
3672 *update |= 1;
3673 }
3674}
3675
3676int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3677{
3678 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3679 struct qedr_cq *cq = get_qedr_cq(ibcq);
Kalderon, Michale3fd1122018-03-05 10:50:10 +02003680 union rdma_cqe *cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003681 u32 old_cons, new_cons;
3682 unsigned long flags;
3683 int update = 0;
3684 int done = 0;
3685
Amrani, Ram4dd72632017-04-27 13:35:34 +03003686 if (cq->destroyed) {
3687 DP_ERR(dev,
3688 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3689 cq, cq->icid);
3690 return 0;
3691 }
3692
Ram Amrani04886772016-10-10 13:15:38 +03003693 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3694 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3695
Ram Amraniafa0e132016-10-10 13:15:36 +03003696 spin_lock_irqsave(&cq->cq_lock, flags);
Kalderon, Michale3fd1122018-03-05 10:50:10 +02003697 cqe = cq->latest_cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003698 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3699 while (num_entries && is_valid_cqe(cq, cqe)) {
3700 struct qedr_qp *qp;
3701 int cnt = 0;
3702
3703 /* prevent speculative reads of any field of CQE */
3704 rmb();
3705
3706 qp = cqe_get_qp(cqe);
3707 if (!qp) {
3708 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3709 break;
3710 }
3711
3712 wc->qp = &qp->ibqp;
3713
3714 switch (cqe_get_type(cqe)) {
3715 case RDMA_CQE_TYPE_REQUESTER:
3716 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3717 &cqe->req);
3718 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3719 break;
3720 case RDMA_CQE_TYPE_RESPONDER_RQ:
3721 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3722 &cqe->resp, &update);
3723 break;
3724 case RDMA_CQE_TYPE_INVALID:
3725 default:
3726 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3727 cqe_get_type(cqe));
3728 }
3729 num_entries -= cnt;
3730 wc += cnt;
3731 done += cnt;
3732
3733 cqe = get_cqe(cq);
3734 }
3735 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3736
3737 cq->cq_cons += new_cons - old_cons;
3738
3739 if (update)
3740 /* doorbell notifies abount latest VALID entry,
3741 * but chain already point to the next INVALID one
3742 */
3743 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3744
3745 spin_unlock_irqrestore(&cq->cq_lock, flags);
3746 return done;
3747}
Ram Amrani993d1b52016-10-10 13:15:39 +03003748
3749int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3750 u8 port_num,
3751 const struct ib_wc *in_wc,
3752 const struct ib_grh *in_grh,
3753 const struct ib_mad_hdr *mad_hdr,
3754 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3755 size_t *out_mad_size, u16 *out_mad_pkey_index)
3756{
3757 struct qedr_dev *dev = get_qedr_dev(ibdev);
3758
3759 DP_DEBUG(dev, QEDR_MSG_GSI,
3760 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3761 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3762 mad_hdr->class_specific, mad_hdr->class_version,
3763 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3764 return IB_MAD_RESULT_SUCCESS;
3765}