blob: a523d6f5fef33d42b1f8f48e43a39f038fa727a9 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Kalderon, Michal99d195c2017-07-26 14:41:51 +030052#include "qedr_roce_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
Amrani, Ramc75d3ec2017-06-26 19:05:04 +030056static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
Ram Amrania7efd772016-10-10 13:15:33 +030064int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
Kalderon, Michale6a38c52017-07-26 14:41:52 +030073int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
Ram Amraniac1b36e2016-10-10 13:15:32 +030087int qedr_query_device(struct ib_device *ibdev,
88 struct ib_device_attr *attr, struct ib_udata *udata)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 struct qedr_device_attr *qattr = &dev->attr;
92
93 if (!dev->rdma_ctx) {
94 DP_ERR(dev,
95 "qedr_query_device called with invalid params rdma_ctx=%p\n",
96 dev->rdma_ctx);
97 return -EINVAL;
98 }
99
100 memset(attr, 0, sizeof(*attr));
101
102 attr->fw_ver = qattr->fw_ver;
103 attr->sys_image_guid = qattr->sys_image_guid;
104 attr->max_mr_size = qattr->max_mr_size;
105 attr->page_size_cap = qattr->page_size_caps;
106 attr->vendor_id = qattr->vendor_id;
107 attr->vendor_part_id = qattr->vendor_part_id;
108 attr->hw_ver = qattr->hw_ver;
109 attr->max_qp = qattr->max_qp;
110 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
111 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
112 IB_DEVICE_RC_RNR_NAK_GEN |
113 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
114
115 attr->max_sge = qattr->max_sge;
116 attr->max_sge_rd = qattr->max_sge;
117 attr->max_cq = qattr->max_cq;
118 attr->max_cqe = qattr->max_cqe;
119 attr->max_mr = qattr->max_mr;
120 attr->max_mw = qattr->max_mw;
121 attr->max_pd = qattr->max_pd;
122 attr->atomic_cap = dev->atomic_cap;
123 attr->max_fmr = qattr->max_fmr;
124 attr->max_map_per_fmr = 16;
125 attr->max_qp_init_rd_atom =
126 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
127 attr->max_qp_rd_atom =
128 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
129 attr->max_qp_init_rd_atom);
130
131 attr->max_srq = qattr->max_srq;
132 attr->max_srq_sge = qattr->max_srq_sge;
133 attr->max_srq_wr = qattr->max_srq_wr;
134
135 attr->local_ca_ack_delay = qattr->dev_ack_delay;
136 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
137 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
138 attr->max_ah = qattr->max_ah;
139
140 return 0;
141}
142
143#define QEDR_SPEED_SDR (1)
144#define QEDR_SPEED_DDR (2)
145#define QEDR_SPEED_QDR (4)
146#define QEDR_SPEED_FDR10 (8)
147#define QEDR_SPEED_FDR (16)
148#define QEDR_SPEED_EDR (32)
149
150static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
151 u8 *ib_width)
152{
153 switch (speed) {
154 case 1000:
155 *ib_speed = QEDR_SPEED_SDR;
156 *ib_width = IB_WIDTH_1X;
157 break;
158 case 10000:
159 *ib_speed = QEDR_SPEED_QDR;
160 *ib_width = IB_WIDTH_1X;
161 break;
162
163 case 20000:
164 *ib_speed = QEDR_SPEED_DDR;
165 *ib_width = IB_WIDTH_4X;
166 break;
167
168 case 25000:
169 *ib_speed = QEDR_SPEED_EDR;
170 *ib_width = IB_WIDTH_1X;
171 break;
172
173 case 40000:
174 *ib_speed = QEDR_SPEED_QDR;
175 *ib_width = IB_WIDTH_4X;
176 break;
177
178 case 50000:
179 *ib_speed = QEDR_SPEED_QDR;
180 *ib_width = IB_WIDTH_4X;
181 break;
182
183 case 100000:
184 *ib_speed = QEDR_SPEED_EDR;
185 *ib_width = IB_WIDTH_4X;
186 break;
187
188 default:
189 /* Unsupported */
190 *ib_speed = QEDR_SPEED_SDR;
191 *ib_width = IB_WIDTH_1X;
192 }
193}
194
195int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
196{
197 struct qedr_dev *dev;
198 struct qed_rdma_port *rdma_port;
199
200 dev = get_qedr_dev(ibdev);
201 if (port > 1) {
202 DP_ERR(dev, "invalid_port=0x%x\n", port);
203 return -EINVAL;
204 }
205
206 if (!dev->rdma_ctx) {
207 DP_ERR(dev, "rdma_ctx is NULL\n");
208 return -EINVAL;
209 }
210
211 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300212
Or Gerlitzc4550c62017-01-24 13:02:39 +0200213 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300214 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
215 attr->state = IB_PORT_ACTIVE;
216 attr->phys_state = 5;
217 } else {
218 attr->state = IB_PORT_DOWN;
219 attr->phys_state = 3;
220 }
221 attr->max_mtu = IB_MTU_4096;
222 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
223 attr->lid = 0;
224 attr->lmc = 0;
225 attr->sm_lid = 0;
226 attr->sm_sl = 0;
227 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
Kalderon, Michalf5b1b172017-07-26 14:41:53 +0300228 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
229 attr->gid_tbl_len = 1;
230 attr->pkey_tbl_len = 1;
231 } else {
232 attr->gid_tbl_len = QEDR_MAX_SGID;
233 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
234 }
Ram Amraniac1b36e2016-10-10 13:15:32 +0300235 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
236 attr->qkey_viol_cntr = 0;
237 get_link_speed_and_width(rdma_port->link_speed,
238 &attr->active_speed, &attr->active_width);
239 attr->max_msg_sz = rdma_port->max_msg_size;
240 attr->max_vl_num = 4;
241
242 return 0;
243}
244
245int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
246 struct ib_port_modify *props)
247{
248 struct qedr_dev *dev;
249
250 dev = get_qedr_dev(ibdev);
251 if (port > 1) {
252 DP_ERR(dev, "invalid_port=0x%x\n", port);
253 return -EINVAL;
254 }
255
256 return 0;
257}
258
259static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
260 unsigned long len)
261{
262 struct qedr_mm *mm;
263
264 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
265 if (!mm)
266 return -ENOMEM;
267
268 mm->key.phy_addr = phy_addr;
269 /* This function might be called with a length which is not a multiple
270 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
271 * forces this granularity by increasing the requested size if needed.
272 * When qedr_mmap is called, it will search the list with the updated
273 * length as a key. To prevent search failures, the length is rounded up
274 * in advance to PAGE_SIZE.
275 */
276 mm->key.len = roundup(len, PAGE_SIZE);
277 INIT_LIST_HEAD(&mm->entry);
278
279 mutex_lock(&uctx->mm_list_lock);
280 list_add(&mm->entry, &uctx->mm_head);
281 mutex_unlock(&uctx->mm_list_lock);
282
283 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
284 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
285 (unsigned long long)mm->key.phy_addr,
286 (unsigned long)mm->key.len, uctx);
287
288 return 0;
289}
290
291static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
292 unsigned long len)
293{
294 bool found = false;
295 struct qedr_mm *mm;
296
297 mutex_lock(&uctx->mm_list_lock);
298 list_for_each_entry(mm, &uctx->mm_head, entry) {
299 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
300 continue;
301
302 found = true;
303 break;
304 }
305 mutex_unlock(&uctx->mm_list_lock);
306 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
307 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
308 mm->key.phy_addr, mm->key.len, uctx, found);
309
310 return found;
311}
312
313struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
314 struct ib_udata *udata)
315{
316 int rc;
317 struct qedr_ucontext *ctx;
318 struct qedr_alloc_ucontext_resp uresp;
319 struct qedr_dev *dev = get_qedr_dev(ibdev);
320 struct qed_rdma_add_user_out_params oparams;
321
322 if (!udata)
323 return ERR_PTR(-EFAULT);
324
325 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
326 if (!ctx)
327 return ERR_PTR(-ENOMEM);
328
329 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
330 if (rc) {
331 DP_ERR(dev,
332 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
333 rc);
334 goto err;
335 }
336
337 ctx->dpi = oparams.dpi;
338 ctx->dpi_addr = oparams.dpi_addr;
339 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
340 ctx->dpi_size = oparams.dpi_size;
341 INIT_LIST_HEAD(&ctx->mm_head);
342 mutex_init(&ctx->mm_list_lock);
343
344 memset(&uresp, 0, sizeof(uresp));
345
Amrani, Ramad84dad2017-06-26 19:05:05 +0300346 uresp.dpm_enabled = dev->user_dpm_enabled;
Amrani, Ram67cbe352017-06-26 19:05:06 +0300347 uresp.wids_enabled = 1;
348 uresp.wid_count = oparams.wid_count;
Ram Amraniac1b36e2016-10-10 13:15:32 +0300349 uresp.db_pa = ctx->dpi_phys_addr;
350 uresp.db_size = ctx->dpi_size;
351 uresp.max_send_wr = dev->attr.max_sqe;
352 uresp.max_recv_wr = dev->attr.max_rqe;
353 uresp.max_srq_wr = dev->attr.max_srq_wr;
354 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
355 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
356 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
357 uresp.max_cqes = QEDR_MAX_CQES;
358
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300359 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amraniac1b36e2016-10-10 13:15:32 +0300360 if (rc)
361 goto err;
362
363 ctx->dev = dev;
364
365 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
366 if (rc)
367 goto err;
368
369 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
370 &ctx->ibucontext);
371 return &ctx->ibucontext;
372
373err:
374 kfree(ctx);
375 return ERR_PTR(rc);
376}
377
378int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
379{
380 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
381 struct qedr_mm *mm, *tmp;
382 int status = 0;
383
384 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
385 uctx);
386 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
387
388 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
389 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
390 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
391 mm->key.phy_addr, mm->key.len, uctx);
392 list_del(&mm->entry);
393 kfree(mm);
394 }
395
396 kfree(uctx);
397 return status;
398}
399
400int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
401{
402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
403 struct qedr_dev *dev = get_qedr_dev(context->device);
404 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
405 u64 unmapped_db = dev->db_phys_addr;
406 unsigned long len = (vma->vm_end - vma->vm_start);
407 int rc = 0;
408 bool found;
409
410 DP_DEBUG(dev, QEDR_MSG_INIT,
411 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
412 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
413 if (vma->vm_start & (PAGE_SIZE - 1)) {
414 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
415 vma->vm_start);
416 return -EINVAL;
417 }
418
419 found = qedr_search_mmap(ucontext, vm_page, len);
420 if (!found) {
421 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
422 vma->vm_pgoff);
423 return -EINVAL;
424 }
425
426 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
427
428 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
429 dev->db_size))) {
430 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
431 if (vma->vm_flags & VM_READ) {
432 DP_ERR(dev, "Trying to map doorbell bar for read\n");
433 return -EPERM;
434 }
435
436 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
437
438 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
439 PAGE_SIZE, vma->vm_page_prot);
440 } else {
441 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
442 rc = remap_pfn_range(vma, vma->vm_start,
443 vma->vm_pgoff, len, vma->vm_page_prot);
444 }
445 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
446 return rc;
447}
Ram Amrania7efd772016-10-10 13:15:33 +0300448
449struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
450 struct ib_ucontext *context, struct ib_udata *udata)
451{
452 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300453 struct qedr_pd *pd;
454 u16 pd_id;
455 int rc;
456
457 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
458 (udata && context) ? "User Lib" : "Kernel");
459
460 if (!dev->rdma_ctx) {
Colin Ian King847cb1a2017-08-24 09:25:53 +0100461 DP_ERR(dev, "invalid RDMA context\n");
Ram Amrania7efd772016-10-10 13:15:33 +0300462 return ERR_PTR(-EINVAL);
463 }
464
465 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
466 if (!pd)
467 return ERR_PTR(-ENOMEM);
468
Ram Amrani9c1e0222017-01-24 13:51:42 +0200469 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
470 if (rc)
471 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300472
Ram Amrania7efd772016-10-10 13:15:33 +0300473 pd->pd_id = pd_id;
474
475 if (udata && context) {
Ram Amrani9c1e0222017-01-24 13:51:42 +0200476 struct qedr_alloc_pd_uresp uresp;
477
478 uresp.pd_id = pd_id;
479
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300480 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200481 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300482 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200483 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
484 goto err;
485 }
486
487 pd->uctx = get_qedr_ucontext(context);
488 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300489 }
490
491 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200492
493err:
494 kfree(pd);
495 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300496}
497
498int qedr_dealloc_pd(struct ib_pd *ibpd)
499{
500 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
501 struct qedr_pd *pd = get_qedr_pd(ibpd);
502
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100503 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300504 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100505 return -EINVAL;
506 }
Ram Amrania7efd772016-10-10 13:15:33 +0300507
508 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
509 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
510
511 kfree(pd);
512
513 return 0;
514}
515
516static void qedr_free_pbl(struct qedr_dev *dev,
517 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
518{
519 struct pci_dev *pdev = dev->pdev;
520 int i;
521
522 for (i = 0; i < pbl_info->num_pbls; i++) {
523 if (!pbl[i].va)
524 continue;
525 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
526 pbl[i].va, pbl[i].pa);
527 }
528
529 kfree(pbl);
530}
531
532#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
533#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
534
535#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
536#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
537#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
538
539static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
540 struct qedr_pbl_info *pbl_info,
541 gfp_t flags)
542{
543 struct pci_dev *pdev = dev->pdev;
544 struct qedr_pbl *pbl_table;
545 dma_addr_t *pbl_main_tbl;
546 dma_addr_t pa;
547 void *va;
548 int i;
549
550 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
551 if (!pbl_table)
552 return ERR_PTR(-ENOMEM);
553
554 for (i = 0; i < pbl_info->num_pbls; i++) {
Himanshu Jha7bced912017-12-31 18:01:03 +0530555 va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
556 &pa, flags);
Ram Amrania7efd772016-10-10 13:15:33 +0300557 if (!va)
558 goto err;
559
Ram Amrania7efd772016-10-10 13:15:33 +0300560 pbl_table[i].va = va;
561 pbl_table[i].pa = pa;
562 }
563
564 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
565 * the first one with physical pointers to all of the rest
566 */
567 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
568 for (i = 0; i < pbl_info->num_pbls - 1; i++)
569 pbl_main_tbl[i] = pbl_table[i + 1].pa;
570
571 return pbl_table;
572
573err:
574 for (i--; i >= 0; i--)
575 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
576 pbl_table[i].va, pbl_table[i].pa);
577
578 qedr_free_pbl(dev, pbl_info, pbl_table);
579
580 return ERR_PTR(-ENOMEM);
581}
582
583static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
584 struct qedr_pbl_info *pbl_info,
585 u32 num_pbes, int two_layer_capable)
586{
587 u32 pbl_capacity;
588 u32 pbl_size;
589 u32 num_pbls;
590
591 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
592 if (num_pbes > MAX_PBES_TWO_LAYER) {
593 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
594 num_pbes);
595 return -EINVAL;
596 }
597
598 /* calculate required pbl page size */
599 pbl_size = MIN_FW_PBL_PAGE_SIZE;
600 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
601 NUM_PBES_ON_PAGE(pbl_size);
602
603 while (pbl_capacity < num_pbes) {
604 pbl_size *= 2;
605 pbl_capacity = pbl_size / sizeof(u64);
606 pbl_capacity = pbl_capacity * pbl_capacity;
607 }
608
609 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
610 num_pbls++; /* One for the layer0 ( points to the pbls) */
611 pbl_info->two_layered = true;
612 } else {
613 /* One layered PBL */
614 num_pbls = 1;
615 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
616 roundup_pow_of_two((num_pbes * sizeof(u64))));
617 pbl_info->two_layered = false;
618 }
619
620 pbl_info->num_pbls = num_pbls;
621 pbl_info->pbl_size = pbl_size;
622 pbl_info->num_pbes = num_pbes;
623
624 DP_DEBUG(dev, QEDR_MSG_MR,
625 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
626 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
627
628 return 0;
629}
630
631static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
632 struct qedr_pbl *pbl,
Ram Amranie57bb6b2017-06-05 16:32:27 +0300633 struct qedr_pbl_info *pbl_info, u32 pg_shift)
Ram Amrania7efd772016-10-10 13:15:33 +0300634{
635 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300636 u32 fw_pg_cnt, fw_pg_per_umem_pg;
Ram Amrania7efd772016-10-10 13:15:33 +0300637 struct qedr_pbl *pbl_tbl;
638 struct scatterlist *sg;
639 struct regpair *pbe;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300640 u64 pg_addr;
Ram Amrania7efd772016-10-10 13:15:33 +0300641 int entry;
Ram Amrania7efd772016-10-10 13:15:33 +0300642
643 if (!pbl_info->num_pbes)
644 return;
645
646 /* If we have a two layered pbl, the first pbl points to the rest
647 * of the pbls and the first entry lays on the second pbl in the table
648 */
649 if (pbl_info->two_layered)
650 pbl_tbl = &pbl[1];
651 else
652 pbl_tbl = pbl;
653
654 pbe = (struct regpair *)pbl_tbl->va;
655 if (!pbe) {
656 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
657 return;
658 }
659
660 pbe_cnt = 0;
661
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300662 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300663
Ram Amranie57bb6b2017-06-05 16:32:27 +0300664 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
665
Ram Amrania7efd772016-10-10 13:15:33 +0300666 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
667 pages = sg_dma_len(sg) >> shift;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300668 pg_addr = sg_dma_address(sg);
Ram Amrania7efd772016-10-10 13:15:33 +0300669 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
Ram Amranie57bb6b2017-06-05 16:32:27 +0300670 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
671 pbe->lo = cpu_to_le32(pg_addr);
672 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
Ram Amrania7efd772016-10-10 13:15:33 +0300673
Ram Amranie57bb6b2017-06-05 16:32:27 +0300674 pg_addr += BIT(pg_shift);
675 pbe_cnt++;
676 total_num_pbes++;
677 pbe++;
Ram Amrania7efd772016-10-10 13:15:33 +0300678
Ram Amranie57bb6b2017-06-05 16:32:27 +0300679 if (total_num_pbes == pbl_info->num_pbes)
680 return;
681
682 /* If the given pbl is full storing the pbes,
683 * move to next pbl.
684 */
685 if (pbe_cnt ==
686 (pbl_info->pbl_size / sizeof(u64))) {
687 pbl_tbl++;
688 pbe = (struct regpair *)pbl_tbl->va;
689 pbe_cnt = 0;
690 }
691
692 fw_pg_cnt++;
Ram Amrania7efd772016-10-10 13:15:33 +0300693 }
694 }
695 }
696}
697
698static int qedr_copy_cq_uresp(struct qedr_dev *dev,
699 struct qedr_cq *cq, struct ib_udata *udata)
700{
701 struct qedr_create_cq_uresp uresp;
702 int rc;
703
704 memset(&uresp, 0, sizeof(uresp));
705
706 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
707 uresp.icid = cq->icid;
708
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300709 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrania7efd772016-10-10 13:15:33 +0300710 if (rc)
711 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
712
713 return rc;
714}
715
716static void consume_cqe(struct qedr_cq *cq)
717{
718 if (cq->latest_cqe == cq->toggle_cqe)
719 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
720
721 cq->latest_cqe = qed_chain_consume(&cq->pbl);
722}
723
724static inline int qedr_align_cq_entries(int entries)
725{
726 u64 size, aligned_size;
727
728 /* We allocate an extra entry that we don't report to the FW. */
729 size = (entries + 1) * QEDR_CQE_SIZE;
730 aligned_size = ALIGN(size, PAGE_SIZE);
731
732 return aligned_size / QEDR_CQE_SIZE;
733}
734
735static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
736 struct qedr_dev *dev,
737 struct qedr_userq *q,
738 u64 buf_addr, size_t buf_len,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300739 int access, int dmasync,
740 int alloc_and_init)
Ram Amrania7efd772016-10-10 13:15:33 +0300741{
Ram Amranie57bb6b2017-06-05 16:32:27 +0300742 u32 fw_pages;
Ram Amrania7efd772016-10-10 13:15:33 +0300743 int rc;
744
745 q->buf_addr = buf_addr;
746 q->buf_len = buf_len;
747 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
748 if (IS_ERR(q->umem)) {
749 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
750 PTR_ERR(q->umem));
751 return PTR_ERR(q->umem);
752 }
753
Ram Amranie57bb6b2017-06-05 16:32:27 +0300754 fw_pages = ib_umem_page_count(q->umem) <<
755 (q->umem->page_shift - FW_PAGE_SHIFT);
756
757 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
Ram Amrania7efd772016-10-10 13:15:33 +0300758 if (rc)
759 goto err0;
760
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300761 if (alloc_and_init) {
762 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
763 if (IS_ERR(q->pbl_tbl)) {
764 rc = PTR_ERR(q->pbl_tbl);
765 goto err0;
766 }
Ram Amranie57bb6b2017-06-05 16:32:27 +0300767 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
768 FW_PAGE_SHIFT);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300769 } else {
770 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
Dan Carpenter89fd2572017-08-25 11:18:39 +0300771 if (!q->pbl_tbl) {
772 rc = -ENOMEM;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300773 goto err0;
Dan Carpenter89fd2572017-08-25 11:18:39 +0300774 }
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300775 }
Ram Amrania7efd772016-10-10 13:15:33 +0300776
777 return 0;
778
779err0:
780 ib_umem_release(q->umem);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300781 q->umem = NULL;
Ram Amrania7efd772016-10-10 13:15:33 +0300782
783 return rc;
784}
785
786static inline void qedr_init_cq_params(struct qedr_cq *cq,
787 struct qedr_ucontext *ctx,
788 struct qedr_dev *dev, int vector,
789 int chain_entries, int page_cnt,
790 u64 pbl_ptr,
791 struct qed_rdma_create_cq_in_params
792 *params)
793{
794 memset(params, 0, sizeof(*params));
795 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
796 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
797 params->cnq_id = vector;
798 params->cq_size = chain_entries - 1;
799 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
800 params->pbl_num_pages = page_cnt;
801 params->pbl_ptr = pbl_ptr;
802 params->pbl_two_level = 0;
803}
804
805static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
806{
807 /* Flush data before signalling doorbell */
808 wmb();
809 cq->db.data.agg_flags = flags;
810 cq->db.data.value = cpu_to_le32(cons);
811 writeq(cq->db.raw, cq->db_addr);
812
813 /* Make sure write would stick */
814 mmiowb();
815}
816
817int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
818{
819 struct qedr_cq *cq = get_qedr_cq(ibcq);
820 unsigned long sflags;
Amrani, Ram4dd72632017-04-27 13:35:34 +0300821 struct qedr_dev *dev;
822
823 dev = get_qedr_dev(ibcq->device);
824
825 if (cq->destroyed) {
826 DP_ERR(dev,
827 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
828 cq, cq->icid);
829 return -EINVAL;
830 }
831
Ram Amrania7efd772016-10-10 13:15:33 +0300832
833 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
834 return 0;
835
836 spin_lock_irqsave(&cq->cq_lock, sflags);
837
838 cq->arm_flags = 0;
839
840 if (flags & IB_CQ_SOLICITED)
841 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
842
843 if (flags & IB_CQ_NEXT_COMP)
844 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
845
846 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
847
848 spin_unlock_irqrestore(&cq->cq_lock, sflags);
849
850 return 0;
851}
852
853struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
854 const struct ib_cq_init_attr *attr,
855 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
856{
857 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
858 struct qed_rdma_destroy_cq_out_params destroy_oparams;
859 struct qed_rdma_destroy_cq_in_params destroy_iparams;
860 struct qedr_dev *dev = get_qedr_dev(ibdev);
861 struct qed_rdma_create_cq_in_params params;
862 struct qedr_create_cq_ureq ureq;
863 int vector = attr->comp_vector;
864 int entries = attr->cqe;
865 struct qedr_cq *cq;
866 int chain_entries;
867 int page_cnt;
868 u64 pbl_ptr;
869 u16 icid;
870 int rc;
871
872 DP_DEBUG(dev, QEDR_MSG_INIT,
873 "create_cq: called from %s. entries=%d, vector=%d\n",
874 udata ? "User Lib" : "Kernel", entries, vector);
875
876 if (entries > QEDR_MAX_CQES) {
877 DP_ERR(dev,
878 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
879 entries, QEDR_MAX_CQES);
880 return ERR_PTR(-EINVAL);
881 }
882
883 chain_entries = qedr_align_cq_entries(entries);
884 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
885
886 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
887 if (!cq)
888 return ERR_PTR(-ENOMEM);
889
890 if (udata) {
891 memset(&ureq, 0, sizeof(ureq));
892 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
893 DP_ERR(dev,
894 "create cq: problem copying data from user space\n");
895 goto err0;
896 }
897
898 if (!ureq.len) {
899 DP_ERR(dev,
900 "create cq: cannot create a cq with 0 entries\n");
901 goto err0;
902 }
903
904 cq->cq_type = QEDR_CQ_TYPE_USER;
905
906 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300907 ureq.len, IB_ACCESS_LOCAL_WRITE,
908 1, 1);
Ram Amrania7efd772016-10-10 13:15:33 +0300909 if (rc)
910 goto err0;
911
912 pbl_ptr = cq->q.pbl_tbl->pa;
913 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200914
915 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300916 } else {
917 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
918
919 rc = dev->ops->common->chain_alloc(dev->cdev,
920 QED_CHAIN_USE_TO_CONSUME,
921 QED_CHAIN_MODE_PBL,
922 QED_CHAIN_CNT_TYPE_U32,
923 chain_entries,
924 sizeof(union rdma_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +0300925 &cq->pbl, NULL);
Ram Amrania7efd772016-10-10 13:15:33 +0300926 if (rc)
927 goto err1;
928
929 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
930 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200931 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300932 }
933
934 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
935 pbl_ptr, &params);
936
937 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
938 if (rc)
939 goto err2;
940
941 cq->icid = icid;
942 cq->sig = QEDR_CQ_MAGIC_NUMBER;
943 spin_lock_init(&cq->cq_lock);
944
945 if (ib_ctx) {
946 rc = qedr_copy_cq_uresp(dev, cq, udata);
947 if (rc)
948 goto err3;
949 } else {
950 /* Generate doorbell address. */
951 cq->db_addr = dev->db_addr +
952 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
953 cq->db.data.icid = cq->icid;
954 cq->db.data.params = DB_AGG_CMD_SET <<
955 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
956
957 /* point to the very last element, passing it we will toggle */
958 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
959 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
960 cq->latest_cqe = NULL;
961 consume_cqe(cq);
962 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
963 }
964
965 DP_DEBUG(dev, QEDR_MSG_CQ,
966 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
967 cq->icid, cq, params.cq_size);
968
969 return &cq->ibcq;
970
971err3:
972 destroy_iparams.icid = cq->icid;
973 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
974 &destroy_oparams);
975err2:
976 if (udata)
977 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
978 else
979 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
980err1:
981 if (udata)
982 ib_umem_release(cq->q.umem);
983err0:
984 kfree(cq);
985 return ERR_PTR(-EINVAL);
986}
987
988int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
989{
990 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
991 struct qedr_cq *cq = get_qedr_cq(ibcq);
992
993 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
994
995 return 0;
996}
997
Amrani, Ram4dd72632017-04-27 13:35:34 +0300998#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
999#define QEDR_DESTROY_CQ_ITER_DURATION (10)
1000
Ram Amrania7efd772016-10-10 13:15:33 +03001001int qedr_destroy_cq(struct ib_cq *ibcq)
1002{
1003 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1004 struct qed_rdma_destroy_cq_out_params oparams;
1005 struct qed_rdma_destroy_cq_in_params iparams;
1006 struct qedr_cq *cq = get_qedr_cq(ibcq);
Amrani, Ram4dd72632017-04-27 13:35:34 +03001007 int iter;
Amrani, Ram942b3b22017-04-27 13:35:33 +03001008 int rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001009
Amrani, Ram942b3b22017-04-27 13:35:33 +03001010 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
Ram Amrania7efd772016-10-10 13:15:33 +03001011
Amrani, Ram4dd72632017-04-27 13:35:34 +03001012 cq->destroyed = 1;
1013
Ram Amrania7efd772016-10-10 13:15:33 +03001014 /* GSIs CQs are handled by driver, so they don't exist in the FW */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001015 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1016 goto done;
Amrani, Rama1211352016-12-22 14:40:34 +02001017
Amrani, Ram942b3b22017-04-27 13:35:33 +03001018 iparams.icid = cq->icid;
1019 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1020 if (rc)
1021 return rc;
1022
1023 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +03001024
1025 if (ibcq->uobject && ibcq->uobject->context) {
1026 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1027 ib_umem_release(cq->q.umem);
1028 }
1029
Amrani, Ram4dd72632017-04-27 13:35:34 +03001030 /* We don't want the IRQ handler to handle a non-existing CQ so we
1031 * wait until all CNQ interrupts, if any, are received. This will always
1032 * happen and will always happen very fast. If not, then a serious error
1033 * has occured. That is why we can use a long delay.
1034 * We spin for a short time so we don’t lose time on context switching
1035 * in case all the completions are handled in that span. Otherwise
1036 * we sleep for a while and check again. Since the CNQ may be
1037 * associated with (only) the current CPU we use msleep to allow the
1038 * current CPU to be freed.
1039 * The CNQ notification is increased in qedr_irq_handler().
1040 */
1041 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1042 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1043 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1044 iter--;
1045 }
1046
1047 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1048 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1049 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1050 iter--;
1051 }
1052
1053 if (oparams.num_cq_notif != cq->cnq_notif)
1054 goto err;
1055
1056 /* Note that we don't need to have explicit code to wait for the
1057 * completion of the event handler because it is invoked from the EQ.
1058 * Since the destroy CQ ramrod has also been received on the EQ we can
1059 * be certain that there's no event handler in process.
1060 */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001061done:
Amrani, Ram4dd72632017-04-27 13:35:34 +03001062 cq->sig = ~cq->sig;
1063
Ram Amrania7efd772016-10-10 13:15:33 +03001064 kfree(cq);
1065
1066 return 0;
Amrani, Ram4dd72632017-04-27 13:35:34 +03001067
1068err:
1069 DP_ERR(dev,
1070 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1071 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1072
1073 return -EINVAL;
Ram Amrania7efd772016-10-10 13:15:33 +03001074}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001075
1076static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1077 struct ib_qp_attr *attr,
1078 int attr_mask,
1079 struct qed_rdma_modify_qp_in_params
1080 *qp_params)
1081{
1082 enum rdma_network_type nw_type;
1083 struct ib_gid_attr gid_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001084 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001085 union ib_gid gid;
1086 u32 ipv4_addr;
1087 int rc = 0;
1088 int i;
1089
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001090 rc = ib_get_cached_gid(ibqp->device,
1091 rdma_ah_get_port_num(&attr->ah_attr),
1092 grh->sgid_index, &gid, &gid_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001093 if (rc)
1094 return rc;
1095
Ram Amranicecbcdd2016-10-10 13:15:34 +03001096 if (gid_attr.ndev) {
1097 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1098
1099 dev_put(gid_attr.ndev);
1100 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1101 switch (nw_type) {
1102 case RDMA_NETWORK_IPV6:
1103 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1104 sizeof(qp_params->sgid));
1105 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001106 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001107 sizeof(qp_params->dgid));
1108 qp_params->roce_mode = ROCE_V2_IPV6;
1109 SET_FIELD(qp_params->modify_flags,
1110 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1111 break;
1112 case RDMA_NETWORK_IB:
1113 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1114 sizeof(qp_params->sgid));
1115 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001116 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001117 sizeof(qp_params->dgid));
1118 qp_params->roce_mode = ROCE_V1;
1119 break;
1120 case RDMA_NETWORK_IPV4:
1121 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1122 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1123 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1124 qp_params->sgid.ipv4_addr = ipv4_addr;
1125 ipv4_addr =
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001126 qedr_get_ipv4_from_gid(grh->dgid.raw);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001127 qp_params->dgid.ipv4_addr = ipv4_addr;
1128 SET_FIELD(qp_params->modify_flags,
1129 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1130 qp_params->roce_mode = ROCE_V2_IPV4;
1131 break;
1132 }
1133 }
1134
1135 for (i = 0; i < 4; i++) {
1136 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1137 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1138 }
1139
1140 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1141 qp_params->vlan_id = 0;
1142
1143 return 0;
1144}
1145
Ram Amranicecbcdd2016-10-10 13:15:34 +03001146static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1147 struct ib_qp_init_attr *attrs)
1148{
1149 struct qedr_device_attr *qattr = &dev->attr;
1150
1151 /* QP0... attrs->qp_type == IB_QPT_GSI */
1152 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1153 DP_DEBUG(dev, QEDR_MSG_QP,
1154 "create qp: unsupported qp type=0x%x requested\n",
1155 attrs->qp_type);
1156 return -EINVAL;
1157 }
1158
1159 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1160 DP_ERR(dev,
1161 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1162 attrs->cap.max_send_wr, qattr->max_sqe);
1163 return -EINVAL;
1164 }
1165
1166 if (attrs->cap.max_inline_data > qattr->max_inline) {
1167 DP_ERR(dev,
1168 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1169 attrs->cap.max_inline_data, qattr->max_inline);
1170 return -EINVAL;
1171 }
1172
1173 if (attrs->cap.max_send_sge > qattr->max_sge) {
1174 DP_ERR(dev,
1175 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1176 attrs->cap.max_send_sge, qattr->max_sge);
1177 return -EINVAL;
1178 }
1179
1180 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1181 DP_ERR(dev,
1182 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1183 attrs->cap.max_recv_sge, qattr->max_sge);
1184 return -EINVAL;
1185 }
1186
1187 /* Unprivileged user space cannot create special QP */
1188 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1189 DP_ERR(dev,
1190 "create qp: userspace can't create special QPs of type=0x%x\n",
1191 attrs->qp_type);
1192 return -EINVAL;
1193 }
1194
1195 return 0;
1196}
1197
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001198static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1199 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001200 struct qedr_qp *qp)
1201{
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001202 /* iWARP requires two doorbells per RQ. */
1203 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1204 uresp->rq_db_offset =
1205 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1206 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1207 } else {
1208 uresp->rq_db_offset =
1209 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1210 }
1211
Ram Amranicecbcdd2016-10-10 13:15:34 +03001212 uresp->rq_icid = qp->icid;
1213}
1214
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001215static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1216 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001217 struct qedr_qp *qp)
1218{
1219 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001220
1221 /* iWARP uses the same cid for rq and sq */
1222 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1223 uresp->sq_icid = qp->icid;
1224 else
1225 uresp->sq_icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001226}
1227
1228static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1229 struct qedr_qp *qp, struct ib_udata *udata)
1230{
1231 struct qedr_create_qp_uresp uresp;
1232 int rc;
1233
1234 memset(&uresp, 0, sizeof(uresp));
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001235 qedr_copy_sq_uresp(dev, &uresp, qp);
1236 qedr_copy_rq_uresp(dev, &uresp, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001237
1238 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1239 uresp.qp_id = qp->qp_id;
1240
Amrani, Ramc75d3ec2017-06-26 19:05:04 +03001241 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranicecbcdd2016-10-10 13:15:34 +03001242 if (rc)
1243 DP_ERR(dev,
1244 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1245 qp->icid);
1246
1247 return rc;
1248}
1249
Amrani, Ramdf158562016-12-22 14:52:24 +02001250static void qedr_set_common_qp_params(struct qedr_dev *dev,
1251 struct qedr_qp *qp,
1252 struct qedr_pd *pd,
1253 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001254{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001255 spin_lock_init(&qp->q_lock);
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001256 atomic_set(&qp->refcnt, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001257 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001258 qp->qp_type = attrs->qp_type;
1259 qp->max_inline_data = attrs->cap.max_inline_data;
1260 qp->sq.max_sges = attrs->cap.max_send_sge;
1261 qp->state = QED_ROCE_QP_STATE_RESET;
1262 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1263 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1264 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1265 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001266 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001267
1268 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001269 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1270 qp->rq.max_sges, qp->rq_cq->icid);
1271 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001272 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1273 pd->pd_id, qp->qp_type, qp->max_inline_data,
1274 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1275 DP_DEBUG(dev, QEDR_MSG_QP,
1276 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1277 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001278}
1279
Amrani, Ramdf158562016-12-22 14:52:24 +02001280static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001281{
1282 qp->sq.db = dev->db_addr +
1283 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1284 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001285 qp->rq.db = dev->db_addr +
1286 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1287 qp->rq.db_data.data.icid = qp->icid;
1288}
1289
Amrani, Ramdf158562016-12-22 14:52:24 +02001290static inline void
1291qedr_init_common_qp_in_params(struct qedr_dev *dev,
1292 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001293 struct qedr_qp *qp,
1294 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001295 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001296 struct qed_rdma_create_qp_in_params *params)
1297{
Amrani, Ramdf158562016-12-22 14:52:24 +02001298 /* QP handle to be written in an async event */
1299 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1300 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001301
Amrani, Ramdf158562016-12-22 14:52:24 +02001302 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1303 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1304 params->pd = pd->pd_id;
1305 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1306 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1307 params->stats_queue = 0;
1308 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1309 params->srq_id = 0;
1310 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001311}
1312
Amrani, Ramdf158562016-12-22 14:52:24 +02001313static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001314{
Amrani, Ramdf158562016-12-22 14:52:24 +02001315 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1316 "qp=%p. "
1317 "sq_addr=0x%llx, "
1318 "sq_len=%zd, "
1319 "rq_addr=0x%llx, "
1320 "rq_len=%zd"
1321 "\n",
1322 qp,
1323 qp->usq.buf_addr,
1324 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1325}
1326
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001327static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
1328{
1329 int rc;
1330
1331 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1332 return 0;
1333
1334 idr_preload(GFP_KERNEL);
1335 spin_lock_irq(&dev->idr_lock);
1336
1337 rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
1338
1339 spin_unlock_irq(&dev->idr_lock);
1340 idr_preload_end();
1341
1342 return rc < 0 ? rc : 0;
1343}
1344
1345static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
1346{
1347 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1348 return;
1349
1350 spin_lock_irq(&dev->idr_lock);
1351 idr_remove(&dev->qpidr, id);
1352 spin_unlock_irq(&dev->idr_lock);
1353}
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001354
1355static inline void
1356qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1357 struct qedr_qp *qp,
1358 struct qed_rdma_create_qp_out_params *out_params)
1359{
1360 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1361 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1362
1363 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1364 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1365
1366 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1367 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1368
1369 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1370 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1371}
1372
Amrani, Ramdf158562016-12-22 14:52:24 +02001373static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1374{
1375 if (qp->usq.umem)
1376 ib_umem_release(qp->usq.umem);
1377 qp->usq.umem = NULL;
1378
1379 if (qp->urq.umem)
1380 ib_umem_release(qp->urq.umem);
1381 qp->urq.umem = NULL;
1382}
1383
1384static int qedr_create_user_qp(struct qedr_dev *dev,
1385 struct qedr_qp *qp,
1386 struct ib_pd *ibpd,
1387 struct ib_udata *udata,
1388 struct ib_qp_init_attr *attrs)
1389{
1390 struct qed_rdma_create_qp_in_params in_params;
1391 struct qed_rdma_create_qp_out_params out_params;
1392 struct qedr_pd *pd = get_qedr_pd(ibpd);
1393 struct ib_ucontext *ib_ctx = NULL;
Amrani, Ramdf158562016-12-22 14:52:24 +02001394 struct qedr_create_qp_ureq ureq;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001395 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001396 int rc = -EINVAL;
1397
1398 ib_ctx = ibpd->uobject->context;
Amrani, Ramdf158562016-12-22 14:52:24 +02001399
1400 memset(&ureq, 0, sizeof(ureq));
1401 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1402 if (rc) {
1403 DP_ERR(dev, "Problem copying data from user space\n");
1404 return rc;
1405 }
1406
1407 /* SQ - read access only (0), dma sync not required (0) */
1408 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001409 ureq.sq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001410 if (rc)
1411 return rc;
1412
1413 /* RQ - read access only (0), dma sync not required (0) */
1414 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001415 ureq.rq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001416 if (rc)
1417 return rc;
1418
1419 memset(&in_params, 0, sizeof(in_params));
1420 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1421 in_params.qp_handle_lo = ureq.qp_handle_lo;
1422 in_params.qp_handle_hi = ureq.qp_handle_hi;
1423 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1424 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1425 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1426 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1427
1428 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1429 &in_params, &out_params);
1430
1431 if (!qp->qed_qp) {
1432 rc = -ENOMEM;
1433 goto err1;
1434 }
1435
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001436 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1437 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1438
Amrani, Ramdf158562016-12-22 14:52:24 +02001439 qp->qp_id = out_params.qp_id;
1440 qp->icid = out_params.icid;
1441
1442 rc = qedr_copy_qp_uresp(dev, qp, udata);
1443 if (rc)
1444 goto err;
1445
1446 qedr_qp_user_print(dev, qp);
1447
1448 return 0;
1449err:
1450 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1451 if (rc)
1452 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1453
1454err1:
1455 qedr_cleanup_user(dev, qp);
1456 return rc;
1457}
1458
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001459static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1460{
1461 qp->sq.db = dev->db_addr +
1462 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1463 qp->sq.db_data.data.icid = qp->icid;
1464
1465 qp->rq.db = dev->db_addr +
1466 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1467 qp->rq.db_data.data.icid = qp->icid;
1468 qp->rq.iwarp_db2 = dev->db_addr +
1469 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1470 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1471 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1472}
1473
Amrani, Ramdf158562016-12-22 14:52:24 +02001474static int
1475qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1476 struct qedr_qp *qp,
1477 struct qed_rdma_create_qp_in_params *in_params,
1478 u32 n_sq_elems, u32 n_rq_elems)
1479{
1480 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001481 int rc;
1482
Ram Amranicecbcdd2016-10-10 13:15:34 +03001483 rc = dev->ops->common->chain_alloc(dev->cdev,
1484 QED_CHAIN_USE_TO_PRODUCE,
1485 QED_CHAIN_MODE_PBL,
1486 QED_CHAIN_CNT_TYPE_U32,
1487 n_sq_elems,
1488 QEDR_SQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001489 &qp->sq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001490
1491 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001492 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001493
Amrani, Ramdf158562016-12-22 14:52:24 +02001494 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1495 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001496
Ram Amranicecbcdd2016-10-10 13:15:34 +03001497 rc = dev->ops->common->chain_alloc(dev->cdev,
1498 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1499 QED_CHAIN_MODE_PBL,
1500 QED_CHAIN_CNT_TYPE_U32,
1501 n_rq_elems,
1502 QEDR_RQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001503 &qp->rq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001504 if (rc)
1505 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001506
Amrani, Ramdf158562016-12-22 14:52:24 +02001507 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1508 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001509
Amrani, Ramdf158562016-12-22 14:52:24 +02001510 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1511 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001512
Amrani, Ramdf158562016-12-22 14:52:24 +02001513 if (!qp->qed_qp)
1514 return -EINVAL;
1515
1516 qp->qp_id = out_params.qp_id;
1517 qp->icid = out_params.icid;
1518
1519 qedr_set_roce_db_info(dev, qp);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001520 return rc;
1521}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001522
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001523static int
1524qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1525 struct qedr_qp *qp,
1526 struct qed_rdma_create_qp_in_params *in_params,
1527 u32 n_sq_elems, u32 n_rq_elems)
1528{
1529 struct qed_rdma_create_qp_out_params out_params;
1530 struct qed_chain_ext_pbl ext_pbl;
1531 int rc;
1532
1533 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1534 QEDR_SQE_ELEMENT_SIZE,
1535 QED_CHAIN_MODE_PBL);
1536 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1537 QEDR_RQE_ELEMENT_SIZE,
1538 QED_CHAIN_MODE_PBL);
1539
1540 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1541 in_params, &out_params);
1542
1543 if (!qp->qed_qp)
1544 return -EINVAL;
1545
1546 /* Now we allocate the chain */
1547 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1548 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1549
1550 rc = dev->ops->common->chain_alloc(dev->cdev,
1551 QED_CHAIN_USE_TO_PRODUCE,
1552 QED_CHAIN_MODE_PBL,
1553 QED_CHAIN_CNT_TYPE_U32,
1554 n_sq_elems,
1555 QEDR_SQE_ELEMENT_SIZE,
1556 &qp->sq.pbl, &ext_pbl);
1557
1558 if (rc)
1559 goto err;
1560
1561 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1562 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1563
1564 rc = dev->ops->common->chain_alloc(dev->cdev,
1565 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1566 QED_CHAIN_MODE_PBL,
1567 QED_CHAIN_CNT_TYPE_U32,
1568 n_rq_elems,
1569 QEDR_RQE_ELEMENT_SIZE,
1570 &qp->rq.pbl, &ext_pbl);
1571
1572 if (rc)
1573 goto err;
1574
1575 qp->qp_id = out_params.qp_id;
1576 qp->icid = out_params.icid;
1577
1578 qedr_set_iwarp_db_info(dev, qp);
1579 return rc;
1580
1581err:
1582 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1583
1584 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001585}
1586
Amrani, Ramdf158562016-12-22 14:52:24 +02001587static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001588{
Amrani, Ramdf158562016-12-22 14:52:24 +02001589 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1590 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001591
Amrani, Ramdf158562016-12-22 14:52:24 +02001592 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1593 kfree(qp->rqe_wr_id);
1594}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001595
Amrani, Ramdf158562016-12-22 14:52:24 +02001596static int qedr_create_kernel_qp(struct qedr_dev *dev,
1597 struct qedr_qp *qp,
1598 struct ib_pd *ibpd,
1599 struct ib_qp_init_attr *attrs)
1600{
1601 struct qed_rdma_create_qp_in_params in_params;
1602 struct qedr_pd *pd = get_qedr_pd(ibpd);
1603 int rc = -EINVAL;
1604 u32 n_rq_elems;
1605 u32 n_sq_elems;
1606 u32 n_sq_entries;
1607
1608 memset(&in_params, 0, sizeof(in_params));
1609
1610 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1611 * the ring. The ring should allow at least a single WR, even if the
1612 * user requested none, due to allocation issues.
1613 * We should add an extra WR since the prod and cons indices of
1614 * wqe_wr_id are managed in such a way that the WQ is considered full
1615 * when (prod+1)%max_wr==cons. We currently don't do that because we
1616 * double the number of entries due an iSER issue that pushes far more
1617 * WRs than indicated. If we decline its ib_post_send() then we get
1618 * error prints in the dmesg we'd like to avoid.
1619 */
1620 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1621 dev->attr.max_sqe);
1622
1623 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1624 GFP_KERNEL);
1625 if (!qp->wqe_wr_id) {
1626 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1627 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001628 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001629
Amrani, Ramdf158562016-12-22 14:52:24 +02001630 /* QP handle to be written in CQE */
1631 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1632 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001633
Amrani, Ramdf158562016-12-22 14:52:24 +02001634 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1635 * the ring. There ring should allow at least a single WR, even if the
1636 * user requested none, due to allocation issues.
1637 */
1638 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1639
1640 /* Allocate driver internal RQ array */
1641 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1642 GFP_KERNEL);
1643 if (!qp->rqe_wr_id) {
1644 DP_ERR(dev,
1645 "create qp: failed RQ shadow memory allocation\n");
1646 kfree(qp->wqe_wr_id);
1647 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001648 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001649
Amrani, Ramdf158562016-12-22 14:52:24 +02001650 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001651
Amrani, Ramdf158562016-12-22 14:52:24 +02001652 n_sq_entries = attrs->cap.max_send_wr;
1653 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1654 n_sq_entries = max_t(u32, n_sq_entries, 1);
1655 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001656
Amrani, Ramdf158562016-12-22 14:52:24 +02001657 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1658
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001659 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1660 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1661 n_sq_elems, n_rq_elems);
1662 else
1663 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1664 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001665 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001666 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001667
1668 return rc;
1669}
1670
1671struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1672 struct ib_qp_init_attr *attrs,
1673 struct ib_udata *udata)
1674{
1675 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001676 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001677 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001678 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001679 int rc = 0;
1680
1681 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1682 udata ? "user library" : "kernel", pd);
1683
1684 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1685 if (rc)
1686 return ERR_PTR(rc);
1687
Wei Yongjun181d8012016-10-28 16:33:47 +00001688 if (attrs->srq)
1689 return ERR_PTR(-EINVAL);
1690
Ram Amranicecbcdd2016-10-10 13:15:34 +03001691 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001692 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1693 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001694 get_qedr_cq(attrs->send_cq),
1695 get_qedr_cq(attrs->send_cq)->icid,
1696 get_qedr_cq(attrs->recv_cq),
1697 get_qedr_cq(attrs->recv_cq)->icid);
1698
Amrani, Ramdf158562016-12-22 14:52:24 +02001699 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1700 if (!qp) {
1701 DP_ERR(dev, "create qp: failed allocating memory\n");
1702 return ERR_PTR(-ENOMEM);
1703 }
1704
1705 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001706
Ram Amrani04886772016-10-10 13:15:38 +03001707 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001708 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1709 if (IS_ERR(ibqp))
1710 kfree(qp);
1711 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001712 }
1713
Amrani, Ramdf158562016-12-22 14:52:24 +02001714 if (udata)
1715 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1716 else
1717 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001718
Amrani, Ramdf158562016-12-22 14:52:24 +02001719 if (rc)
1720 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001721
Ram Amranicecbcdd2016-10-10 13:15:34 +03001722 qp->ibqp.qp_num = qp->qp_id;
1723
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001724 rc = qedr_idr_add(dev, qp, qp->qp_id);
1725 if (rc)
1726 goto err;
1727
Ram Amranicecbcdd2016-10-10 13:15:34 +03001728 return &qp->ibqp;
1729
Amrani, Ramdf158562016-12-22 14:52:24 +02001730err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001731 kfree(qp);
1732
1733 return ERR_PTR(-EFAULT);
1734}
1735
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001736static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001737{
1738 switch (qp_state) {
1739 case QED_ROCE_QP_STATE_RESET:
1740 return IB_QPS_RESET;
1741 case QED_ROCE_QP_STATE_INIT:
1742 return IB_QPS_INIT;
1743 case QED_ROCE_QP_STATE_RTR:
1744 return IB_QPS_RTR;
1745 case QED_ROCE_QP_STATE_RTS:
1746 return IB_QPS_RTS;
1747 case QED_ROCE_QP_STATE_SQD:
1748 return IB_QPS_SQD;
1749 case QED_ROCE_QP_STATE_ERR:
1750 return IB_QPS_ERR;
1751 case QED_ROCE_QP_STATE_SQE:
1752 return IB_QPS_SQE;
1753 }
1754 return IB_QPS_ERR;
1755}
1756
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001757static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1758 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001759{
1760 switch (qp_state) {
1761 case IB_QPS_RESET:
1762 return QED_ROCE_QP_STATE_RESET;
1763 case IB_QPS_INIT:
1764 return QED_ROCE_QP_STATE_INIT;
1765 case IB_QPS_RTR:
1766 return QED_ROCE_QP_STATE_RTR;
1767 case IB_QPS_RTS:
1768 return QED_ROCE_QP_STATE_RTS;
1769 case IB_QPS_SQD:
1770 return QED_ROCE_QP_STATE_SQD;
1771 case IB_QPS_ERR:
1772 return QED_ROCE_QP_STATE_ERR;
1773 default:
1774 return QED_ROCE_QP_STATE_ERR;
1775 }
1776}
1777
1778static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1779{
1780 qed_chain_reset(&qph->pbl);
1781 qph->prod = 0;
1782 qph->cons = 0;
1783 qph->wqe_cons = 0;
1784 qph->db_data.data.value = cpu_to_le16(0);
1785}
1786
1787static int qedr_update_qp_state(struct qedr_dev *dev,
1788 struct qedr_qp *qp,
1789 enum qed_roce_qp_state new_state)
1790{
1791 int status = 0;
1792
1793 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001794 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001795
1796 switch (qp->state) {
1797 case QED_ROCE_QP_STATE_RESET:
1798 switch (new_state) {
1799 case QED_ROCE_QP_STATE_INIT:
1800 qp->prev_wqe_size = 0;
1801 qedr_reset_qp_hwq_info(&qp->sq);
1802 qedr_reset_qp_hwq_info(&qp->rq);
1803 break;
1804 default:
1805 status = -EINVAL;
1806 break;
1807 };
1808 break;
1809 case QED_ROCE_QP_STATE_INIT:
1810 switch (new_state) {
1811 case QED_ROCE_QP_STATE_RTR:
1812 /* Update doorbell (in case post_recv was
1813 * done before move to RTR)
1814 */
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001815
1816 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1817 wmb();
Sinan Kaya561e5d482018-03-13 23:20:24 -04001818 writel_relaxed(qp->rq.db_data.raw, qp->rq.db);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001819 /* Make sure write takes effect */
1820 mmiowb();
1821 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001822 break;
1823 case QED_ROCE_QP_STATE_ERR:
1824 break;
1825 default:
1826 /* Invalid state change. */
1827 status = -EINVAL;
1828 break;
1829 };
1830 break;
1831 case QED_ROCE_QP_STATE_RTR:
1832 /* RTR->XXX */
1833 switch (new_state) {
1834 case QED_ROCE_QP_STATE_RTS:
1835 break;
1836 case QED_ROCE_QP_STATE_ERR:
1837 break;
1838 default:
1839 /* Invalid state change. */
1840 status = -EINVAL;
1841 break;
1842 };
1843 break;
1844 case QED_ROCE_QP_STATE_RTS:
1845 /* RTS->XXX */
1846 switch (new_state) {
1847 case QED_ROCE_QP_STATE_SQD:
1848 break;
1849 case QED_ROCE_QP_STATE_ERR:
1850 break;
1851 default:
1852 /* Invalid state change. */
1853 status = -EINVAL;
1854 break;
1855 };
1856 break;
1857 case QED_ROCE_QP_STATE_SQD:
1858 /* SQD->XXX */
1859 switch (new_state) {
1860 case QED_ROCE_QP_STATE_RTS:
1861 case QED_ROCE_QP_STATE_ERR:
1862 break;
1863 default:
1864 /* Invalid state change. */
1865 status = -EINVAL;
1866 break;
1867 };
1868 break;
1869 case QED_ROCE_QP_STATE_ERR:
1870 /* ERR->XXX */
1871 switch (new_state) {
1872 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001873 if ((qp->rq.prod != qp->rq.cons) ||
1874 (qp->sq.prod != qp->sq.cons)) {
1875 DP_NOTICE(dev,
1876 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1877 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1878 qp->sq.cons);
1879 status = -EINVAL;
1880 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001881 break;
1882 default:
1883 status = -EINVAL;
1884 break;
1885 };
1886 break;
1887 default:
1888 status = -EINVAL;
1889 break;
1890 };
1891
1892 return status;
1893}
1894
1895int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1896 int attr_mask, struct ib_udata *udata)
1897{
1898 struct qedr_qp *qp = get_qedr_qp(ibqp);
1899 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1900 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001901 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001902 enum ib_qp_state old_qp_state, new_qp_state;
1903 int rc = 0;
1904
1905 DP_DEBUG(dev, QEDR_MSG_QP,
1906 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1907 attr->qp_state);
1908
1909 old_qp_state = qedr_get_ibqp_state(qp->state);
1910 if (attr_mask & IB_QP_STATE)
1911 new_qp_state = attr->qp_state;
1912 else
1913 new_qp_state = old_qp_state;
1914
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001915 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1916 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
1917 ibqp->qp_type, attr_mask,
1918 IB_LINK_LAYER_ETHERNET)) {
1919 DP_ERR(dev,
1920 "modify qp: invalid attribute mask=0x%x specified for\n"
1921 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1922 attr_mask, qp->qp_id, ibqp->qp_type,
1923 old_qp_state, new_qp_state);
1924 rc = -EINVAL;
1925 goto err;
1926 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001927 }
1928
1929 /* Translate the masks... */
1930 if (attr_mask & IB_QP_STATE) {
1931 SET_FIELD(qp_params.modify_flags,
1932 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1933 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1934 }
1935
1936 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1937 qp_params.sqd_async = true;
1938
1939 if (attr_mask & IB_QP_PKEY_INDEX) {
1940 SET_FIELD(qp_params.modify_flags,
1941 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1942 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1943 rc = -EINVAL;
1944 goto err;
1945 }
1946
1947 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1948 }
1949
1950 if (attr_mask & IB_QP_QKEY)
1951 qp->qkey = attr->qkey;
1952
1953 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1954 SET_FIELD(qp_params.modify_flags,
1955 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1956 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1957 IB_ACCESS_REMOTE_READ;
1958 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1959 IB_ACCESS_REMOTE_WRITE;
1960 qp_params.incoming_atomic_en = attr->qp_access_flags &
1961 IB_ACCESS_REMOTE_ATOMIC;
1962 }
1963
1964 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1965 if (attr_mask & IB_QP_PATH_MTU) {
1966 if (attr->path_mtu < IB_MTU_256 ||
1967 attr->path_mtu > IB_MTU_4096) {
1968 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1969 rc = -EINVAL;
1970 goto err;
1971 }
1972 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1973 ib_mtu_enum_to_int(iboe_get_mtu
1974 (dev->ndev->mtu)));
1975 }
1976
1977 if (!qp->mtu) {
1978 qp->mtu =
1979 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1980 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1981 }
1982
1983 SET_FIELD(qp_params.modify_flags,
1984 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1985
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001986 qp_params.traffic_class_tos = grh->traffic_class;
1987 qp_params.flow_label = grh->flow_label;
1988 qp_params.hop_limit_ttl = grh->hop_limit;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001989
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001990 qp->sgid_idx = grh->sgid_index;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001991
1992 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1993 if (rc) {
1994 DP_ERR(dev,
1995 "modify qp: problems with GID index %d (rc=%d)\n",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001996 grh->sgid_index, rc);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001997 return rc;
1998 }
1999
2000 rc = qedr_get_dmac(dev, &attr->ah_attr,
2001 qp_params.remote_mac_addr);
2002 if (rc)
2003 return rc;
2004
2005 qp_params.use_local_mac = true;
2006 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2007
2008 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2009 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2010 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2011 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2012 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2013 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2014 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2015 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002016
2017 qp_params.mtu = qp->mtu;
2018 qp_params.lb_indication = false;
2019 }
2020
2021 if (!qp_params.mtu) {
2022 /* Stay with current MTU */
2023 if (qp->mtu)
2024 qp_params.mtu = qp->mtu;
2025 else
2026 qp_params.mtu =
2027 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2028 }
2029
2030 if (attr_mask & IB_QP_TIMEOUT) {
2031 SET_FIELD(qp_params.modify_flags,
2032 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2033
2034 qp_params.ack_timeout = attr->timeout;
2035 if (attr->timeout) {
2036 u32 temp;
2037
2038 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
2039 /* FW requires [msec] */
2040 qp_params.ack_timeout = temp;
2041 } else {
2042 /* Infinite */
2043 qp_params.ack_timeout = 0;
2044 }
2045 }
2046 if (attr_mask & IB_QP_RETRY_CNT) {
2047 SET_FIELD(qp_params.modify_flags,
2048 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2049 qp_params.retry_cnt = attr->retry_cnt;
2050 }
2051
2052 if (attr_mask & IB_QP_RNR_RETRY) {
2053 SET_FIELD(qp_params.modify_flags,
2054 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2055 qp_params.rnr_retry_cnt = attr->rnr_retry;
2056 }
2057
2058 if (attr_mask & IB_QP_RQ_PSN) {
2059 SET_FIELD(qp_params.modify_flags,
2060 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2061 qp_params.rq_psn = attr->rq_psn;
2062 qp->rq_psn = attr->rq_psn;
2063 }
2064
2065 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2066 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2067 rc = -EINVAL;
2068 DP_ERR(dev,
2069 "unsupported max_rd_atomic=%d, supported=%d\n",
2070 attr->max_rd_atomic,
2071 dev->attr.max_qp_req_rd_atomic_resc);
2072 goto err;
2073 }
2074
2075 SET_FIELD(qp_params.modify_flags,
2076 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2077 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2078 }
2079
2080 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2081 SET_FIELD(qp_params.modify_flags,
2082 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2083 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2084 }
2085
2086 if (attr_mask & IB_QP_SQ_PSN) {
2087 SET_FIELD(qp_params.modify_flags,
2088 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2089 qp_params.sq_psn = attr->sq_psn;
2090 qp->sq_psn = attr->sq_psn;
2091 }
2092
2093 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2094 if (attr->max_dest_rd_atomic >
2095 dev->attr.max_qp_resp_rd_atomic_resc) {
2096 DP_ERR(dev,
2097 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2098 attr->max_dest_rd_atomic,
2099 dev->attr.max_qp_resp_rd_atomic_resc);
2100
2101 rc = -EINVAL;
2102 goto err;
2103 }
2104
2105 SET_FIELD(qp_params.modify_flags,
2106 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2107 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2108 }
2109
2110 if (attr_mask & IB_QP_DEST_QPN) {
2111 SET_FIELD(qp_params.modify_flags,
2112 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2113
2114 qp_params.dest_qp = attr->dest_qp_num;
2115 qp->dest_qp_num = attr->dest_qp_num;
2116 }
2117
2118 if (qp->qp_type != IB_QPT_GSI)
2119 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2120 qp->qed_qp, &qp_params);
2121
2122 if (attr_mask & IB_QP_STATE) {
2123 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02002124 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002125 qp->state = qp_params.new_state;
2126 }
2127
2128err:
2129 return rc;
2130}
2131
2132static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2133{
2134 int ib_qp_acc_flags = 0;
2135
2136 if (params->incoming_rdma_write_en)
2137 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2138 if (params->incoming_rdma_read_en)
2139 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2140 if (params->incoming_atomic_en)
2141 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2142 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2143 return ib_qp_acc_flags;
2144}
2145
2146int qedr_query_qp(struct ib_qp *ibqp,
2147 struct ib_qp_attr *qp_attr,
2148 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2149{
2150 struct qed_rdma_query_qp_out_params params;
2151 struct qedr_qp *qp = get_qedr_qp(ibqp);
2152 struct qedr_dev *dev = qp->dev;
2153 int rc = 0;
2154
2155 memset(&params, 0, sizeof(params));
2156
2157 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2158 if (rc)
2159 goto err;
2160
2161 memset(qp_attr, 0, sizeof(*qp_attr));
2162 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2163
2164 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2165 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002166 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002167 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2168 qp_attr->rq_psn = params.rq_psn;
2169 qp_attr->sq_psn = params.sq_psn;
2170 qp_attr->dest_qp_num = params.dest_qp;
2171
2172 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2173
2174 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2175 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2176 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2177 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002178 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002179 qp_init_attr->cap = qp_attr->cap;
2180
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04002181 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002182 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2183 params.flow_label, qp->sgid_idx,
2184 params.hop_limit_ttl, params.traffic_class_tos);
2185 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2186 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2187 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002188 qp_attr->timeout = params.timeout;
2189 qp_attr->rnr_retry = params.rnr_retry;
2190 qp_attr->retry_cnt = params.retry_cnt;
2191 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2192 qp_attr->pkey_index = params.pkey_index;
2193 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002194 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2195 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002196 qp_attr->alt_pkey_index = 0;
2197 qp_attr->alt_port_num = 0;
2198 qp_attr->alt_timeout = 0;
2199 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2200
2201 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2202 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2203 qp_attr->max_rd_atomic = params.max_rd_atomic;
2204 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2205
2206 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2207 qp_attr->cap.max_inline_data);
2208
2209err:
2210 return rc;
2211}
2212
Bart Van Assche00899852017-10-11 10:49:17 -07002213static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
Amrani, Ramdf158562016-12-22 14:52:24 +02002214{
2215 int rc = 0;
2216
2217 if (qp->qp_type != IB_QPT_GSI) {
2218 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2219 if (rc)
2220 return rc;
2221 }
2222
2223 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2224 qedr_cleanup_user(dev, qp);
2225 else
2226 qedr_cleanup_kernel(dev, qp);
2227
2228 return 0;
2229}
2230
Ram Amranicecbcdd2016-10-10 13:15:34 +03002231int qedr_destroy_qp(struct ib_qp *ibqp)
2232{
2233 struct qedr_qp *qp = get_qedr_qp(ibqp);
2234 struct qedr_dev *dev = qp->dev;
2235 struct ib_qp_attr attr;
2236 int attr_mask = 0;
2237 int rc = 0;
2238
2239 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2240 qp, qp->qp_type);
2241
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002242 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2243 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2244 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2245 (qp->state != QED_ROCE_QP_STATE_INIT)) {
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002246
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002247 attr.qp_state = IB_QPS_ERR;
2248 attr_mask |= IB_QP_STATE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002249
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002250 /* Change the QP state to ERROR */
2251 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2252 }
Kalderon, Michale411e052017-07-26 14:41:56 +03002253 } else {
2254 /* Wait for the connect/accept to complete */
2255 if (qp->ep) {
2256 int wait_count = 1;
2257
2258 while (qp->ep->during_connect) {
2259 DP_DEBUG(dev, QEDR_MSG_QP,
2260 "Still in during connect/accept\n");
2261
2262 msleep(100);
2263 if (wait_count++ > 200) {
2264 DP_NOTICE(dev,
2265 "during connect timeout\n");
2266 break;
2267 }
2268 }
2269 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002270 }
2271
Amrani, Ramdf158562016-12-22 14:52:24 +02002272 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002273 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002274
Amrani, Ramdf158562016-12-22 14:52:24 +02002275 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002276
Kalderon, Michalde0089e2017-07-26 14:41:55 +03002277 if (atomic_dec_and_test(&qp->refcnt)) {
2278 qedr_idr_remove(dev, qp->qp_id);
2279 kfree(qp);
2280 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002281 return rc;
2282}
Ram Amranie0290cc2016-10-10 13:15:35 +03002283
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002284struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002285 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002286{
2287 struct qedr_ah *ah;
2288
2289 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2290 if (!ah)
2291 return ERR_PTR(-ENOMEM);
2292
2293 ah->attr = *attr;
2294
2295 return &ah->ibah;
2296}
2297
2298int qedr_destroy_ah(struct ib_ah *ibah)
2299{
2300 struct qedr_ah *ah = get_qedr_ah(ibah);
2301
2302 kfree(ah);
2303 return 0;
2304}
2305
Ram Amranie0290cc2016-10-10 13:15:35 +03002306static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2307{
2308 struct qedr_pbl *pbl, *tmp;
2309
2310 if (info->pbl_table)
2311 list_add_tail(&info->pbl_table->list_entry,
2312 &info->free_pbl_list);
2313
2314 if (!list_empty(&info->inuse_pbl_list))
2315 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2316
2317 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2318 list_del(&pbl->list_entry);
2319 qedr_free_pbl(dev, &info->pbl_info, pbl);
2320 }
2321}
2322
2323static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2324 size_t page_list_len, bool two_layered)
2325{
2326 struct qedr_pbl *tmp;
2327 int rc;
2328
2329 INIT_LIST_HEAD(&info->free_pbl_list);
2330 INIT_LIST_HEAD(&info->inuse_pbl_list);
2331
2332 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2333 page_list_len, two_layered);
2334 if (rc)
2335 goto done;
2336
2337 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002338 if (IS_ERR(info->pbl_table)) {
2339 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002340 goto done;
2341 }
2342
2343 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2344 &info->pbl_table->pa);
2345
2346 /* in usual case we use 2 PBLs, so we add one to free
2347 * list and allocating another one
2348 */
2349 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002350 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002351 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2352 goto done;
2353 }
2354
2355 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2356
2357 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2358
2359done:
2360 if (rc)
2361 free_mr_info(dev, info);
2362
2363 return rc;
2364}
2365
2366struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2367 u64 usr_addr, int acc, struct ib_udata *udata)
2368{
2369 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2370 struct qedr_mr *mr;
2371 struct qedr_pd *pd;
2372 int rc = -ENOMEM;
2373
2374 pd = get_qedr_pd(ibpd);
2375 DP_DEBUG(dev, QEDR_MSG_MR,
2376 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2377 pd->pd_id, start, len, usr_addr, acc);
2378
2379 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2380 return ERR_PTR(-EINVAL);
2381
2382 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2383 if (!mr)
2384 return ERR_PTR(rc);
2385
2386 mr->type = QEDR_MR_USER;
2387
2388 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2389 if (IS_ERR(mr->umem)) {
2390 rc = -EFAULT;
2391 goto err0;
2392 }
2393
2394 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2395 if (rc)
2396 goto err1;
2397
2398 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
Ram Amranie57bb6b2017-06-05 16:32:27 +03002399 &mr->info.pbl_info, mr->umem->page_shift);
Ram Amranie0290cc2016-10-10 13:15:35 +03002400
2401 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2402 if (rc) {
2403 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2404 goto err1;
2405 }
2406
2407 /* Index only, 18 bit long, lkey = itid << 8 | key */
2408 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2409 mr->hw_mr.key = 0;
2410 mr->hw_mr.pd = pd->pd_id;
2411 mr->hw_mr.local_read = 1;
2412 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2413 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2414 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2415 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2416 mr->hw_mr.mw_bind = false;
2417 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2418 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2419 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002420 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002421 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2422 mr->hw_mr.length = len;
2423 mr->hw_mr.vaddr = usr_addr;
2424 mr->hw_mr.zbva = false;
2425 mr->hw_mr.phy_mr = false;
2426 mr->hw_mr.dma_mr = false;
2427
2428 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2429 if (rc) {
2430 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2431 goto err2;
2432 }
2433
2434 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2435 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2436 mr->hw_mr.remote_atomic)
2437 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2438
2439 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2440 mr->ibmr.lkey);
2441 return &mr->ibmr;
2442
2443err2:
2444 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2445err1:
2446 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2447err0:
2448 kfree(mr);
2449 return ERR_PTR(rc);
2450}
2451
2452int qedr_dereg_mr(struct ib_mr *ib_mr)
2453{
2454 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2455 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2456 int rc = 0;
2457
2458 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2459 if (rc)
2460 return rc;
2461
2462 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2463
2464 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2465 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2466
2467 /* it could be user registered memory. */
2468 if (mr->umem)
2469 ib_umem_release(mr->umem);
2470
2471 kfree(mr);
2472
2473 return rc;
2474}
2475
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002476static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2477 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002478{
2479 struct qedr_pd *pd = get_qedr_pd(ibpd);
2480 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2481 struct qedr_mr *mr;
2482 int rc = -ENOMEM;
2483
2484 DP_DEBUG(dev, QEDR_MSG_MR,
2485 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2486 max_page_list_len);
2487
2488 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2489 if (!mr)
2490 return ERR_PTR(rc);
2491
2492 mr->dev = dev;
2493 mr->type = QEDR_MR_FRMR;
2494
2495 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2496 if (rc)
2497 goto err0;
2498
2499 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2500 if (rc) {
2501 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2502 goto err0;
2503 }
2504
2505 /* Index only, 18 bit long, lkey = itid << 8 | key */
2506 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2507 mr->hw_mr.key = 0;
2508 mr->hw_mr.pd = pd->pd_id;
2509 mr->hw_mr.local_read = 1;
2510 mr->hw_mr.local_write = 0;
2511 mr->hw_mr.remote_read = 0;
2512 mr->hw_mr.remote_write = 0;
2513 mr->hw_mr.remote_atomic = 0;
2514 mr->hw_mr.mw_bind = false;
2515 mr->hw_mr.pbl_ptr = 0;
2516 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2517 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2518 mr->hw_mr.fbo = 0;
2519 mr->hw_mr.length = 0;
2520 mr->hw_mr.vaddr = 0;
2521 mr->hw_mr.zbva = false;
2522 mr->hw_mr.phy_mr = true;
2523 mr->hw_mr.dma_mr = false;
2524
2525 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2526 if (rc) {
2527 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2528 goto err1;
2529 }
2530
2531 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2532 mr->ibmr.rkey = mr->ibmr.lkey;
2533
2534 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2535 return mr;
2536
2537err1:
2538 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2539err0:
2540 kfree(mr);
2541 return ERR_PTR(rc);
2542}
2543
2544struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2545 enum ib_mr_type mr_type, u32 max_num_sg)
2546{
Ram Amranie0290cc2016-10-10 13:15:35 +03002547 struct qedr_mr *mr;
2548
2549 if (mr_type != IB_MR_TYPE_MEM_REG)
2550 return ERR_PTR(-EINVAL);
2551
2552 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2553
2554 if (IS_ERR(mr))
2555 return ERR_PTR(-EINVAL);
2556
Ram Amranie0290cc2016-10-10 13:15:35 +03002557 return &mr->ibmr;
2558}
2559
2560static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2561{
2562 struct qedr_mr *mr = get_qedr_mr(ibmr);
2563 struct qedr_pbl *pbl_table;
2564 struct regpair *pbe;
2565 u32 pbes_in_page;
2566
2567 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2568 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2569 return -ENOMEM;
2570 }
2571
2572 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2573 mr->npages, addr);
2574
2575 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2576 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2577 pbe = (struct regpair *)pbl_table->va;
2578 pbe += mr->npages % pbes_in_page;
2579 pbe->lo = cpu_to_le32((u32)addr);
2580 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2581
2582 mr->npages++;
2583
2584 return 0;
2585}
2586
2587static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2588{
2589 int work = info->completed - info->completed_handled - 1;
2590
2591 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2592 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2593 struct qedr_pbl *pbl;
2594
2595 /* Free all the page list that are possible to be freed
2596 * (all the ones that were invalidated), under the assumption
2597 * that if an FMR was completed successfully that means that
2598 * if there was an invalidate operation before it also ended
2599 */
2600 pbl = list_first_entry(&info->inuse_pbl_list,
2601 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002602 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002603 info->completed_handled++;
2604 }
2605}
2606
2607int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2608 int sg_nents, unsigned int *sg_offset)
2609{
2610 struct qedr_mr *mr = get_qedr_mr(ibmr);
2611
2612 mr->npages = 0;
2613
2614 handle_completed_mrs(mr->dev, &mr->info);
2615 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2616}
2617
2618struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2619{
2620 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2621 struct qedr_pd *pd = get_qedr_pd(ibpd);
2622 struct qedr_mr *mr;
2623 int rc;
2624
2625 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2626 if (!mr)
2627 return ERR_PTR(-ENOMEM);
2628
2629 mr->type = QEDR_MR_DMA;
2630
2631 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2632 if (rc) {
2633 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2634 goto err1;
2635 }
2636
2637 /* index only, 18 bit long, lkey = itid << 8 | key */
2638 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2639 mr->hw_mr.pd = pd->pd_id;
2640 mr->hw_mr.local_read = 1;
2641 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2642 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2643 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2644 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2645 mr->hw_mr.dma_mr = true;
2646
2647 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2648 if (rc) {
2649 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2650 goto err2;
2651 }
2652
2653 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2654 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2655 mr->hw_mr.remote_atomic)
2656 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2657
2658 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2659 return &mr->ibmr;
2660
2661err2:
2662 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2663err1:
2664 kfree(mr);
2665 return ERR_PTR(rc);
2666}
Ram Amraniafa0e132016-10-10 13:15:36 +03002667
2668static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2669{
2670 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2671}
2672
2673static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2674{
2675 int i, len = 0;
2676
2677 for (i = 0; i < num_sge; i++)
2678 len += sg_list[i].length;
2679
2680 return len;
2681}
2682
2683static void swap_wqe_data64(u64 *p)
2684{
2685 int i;
2686
2687 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2688 *p = cpu_to_be64(cpu_to_le64(*p));
2689}
2690
2691static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2692 struct qedr_qp *qp, u8 *wqe_size,
2693 struct ib_send_wr *wr,
2694 struct ib_send_wr **bad_wr, u8 *bits,
2695 u8 bit)
2696{
2697 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2698 char *seg_prt, *wqe;
2699 int i, seg_siz;
2700
2701 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2702 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2703 *bad_wr = wr;
2704 return 0;
2705 }
2706
2707 if (!data_size)
2708 return data_size;
2709
2710 *bits |= bit;
2711
2712 seg_prt = NULL;
2713 wqe = NULL;
2714 seg_siz = 0;
2715
2716 /* Copy data inline */
2717 for (i = 0; i < wr->num_sge; i++) {
2718 u32 len = wr->sg_list[i].length;
2719 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2720
2721 while (len > 0) {
2722 u32 cur;
2723
2724 /* New segment required */
2725 if (!seg_siz) {
2726 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2727 seg_prt = wqe;
2728 seg_siz = sizeof(struct rdma_sq_common_wqe);
2729 (*wqe_size)++;
2730 }
2731
2732 /* Calculate currently allowed length */
2733 cur = min_t(u32, len, seg_siz);
2734 memcpy(seg_prt, src, cur);
2735
2736 /* Update segment variables */
2737 seg_prt += cur;
2738 seg_siz -= cur;
2739
2740 /* Update sge variables */
2741 src += cur;
2742 len -= cur;
2743
2744 /* Swap fully-completed segments */
2745 if (!seg_siz)
2746 swap_wqe_data64((u64 *)wqe);
2747 }
2748 }
2749
2750 /* swap last not completed segment */
2751 if (seg_siz)
2752 swap_wqe_data64((u64 *)wqe);
2753
2754 return data_size;
2755}
2756
2757#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2758 do { \
2759 DMA_REGPAIR_LE(sge->addr, vaddr); \
2760 (sge)->length = cpu_to_le32(vlength); \
2761 (sge)->flags = cpu_to_le32(vflags); \
2762 } while (0)
2763
2764#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2765 do { \
2766 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2767 (hdr)->num_sges = num_sge; \
2768 } while (0)
2769
2770#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2771 do { \
2772 DMA_REGPAIR_LE(sge->addr, vaddr); \
2773 (sge)->length = cpu_to_le32(vlength); \
2774 (sge)->l_key = cpu_to_le32(vlkey); \
2775 } while (0)
2776
2777static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2778 struct ib_send_wr *wr)
2779{
2780 u32 data_size = 0;
2781 int i;
2782
2783 for (i = 0; i < wr->num_sge; i++) {
2784 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2785
2786 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2787 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2788 sge->length = cpu_to_le32(wr->sg_list[i].length);
2789 data_size += wr->sg_list[i].length;
2790 }
2791
2792 if (wqe_size)
2793 *wqe_size += wr->num_sge;
2794
2795 return data_size;
2796}
2797
2798static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2799 struct qedr_qp *qp,
2800 struct rdma_sq_rdma_wqe_1st *rwqe,
2801 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2802 struct ib_send_wr *wr,
2803 struct ib_send_wr **bad_wr)
2804{
2805 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2806 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2807
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002808 if (wr->send_flags & IB_SEND_INLINE &&
2809 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2810 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002811 u8 flags = 0;
2812
2813 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2814 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2815 bad_wr, &rwqe->flags, flags);
2816 }
2817
2818 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2819}
2820
2821static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2822 struct qedr_qp *qp,
2823 struct rdma_sq_send_wqe_1st *swqe,
2824 struct rdma_sq_send_wqe_2st *swqe2,
2825 struct ib_send_wr *wr,
2826 struct ib_send_wr **bad_wr)
2827{
2828 memset(swqe2, 0, sizeof(*swqe2));
2829 if (wr->send_flags & IB_SEND_INLINE) {
2830 u8 flags = 0;
2831
2832 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2833 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2834 bad_wr, &swqe->flags, flags);
2835 }
2836
2837 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2838}
2839
2840static int qedr_prepare_reg(struct qedr_qp *qp,
2841 struct rdma_sq_fmr_wqe_1st *fwqe1,
2842 struct ib_reg_wr *wr)
2843{
2844 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2845 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2846
2847 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2848 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2849 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2850 fwqe1->l_key = wr->key;
2851
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002852 fwqe2->access_ctrl = 0;
2853
Ram Amraniafa0e132016-10-10 13:15:36 +03002854 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2855 !!(wr->access & IB_ACCESS_REMOTE_READ));
2856 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2857 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2858 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2859 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2860 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2861 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2862 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2863 fwqe2->fmr_ctrl = 0;
2864
2865 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2866 ilog2(mr->ibmr.page_size) - 12);
2867
2868 fwqe2->length_hi = 0;
2869 fwqe2->length_lo = mr->ibmr.length;
2870 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2871 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2872
2873 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2874
2875 return 0;
2876}
2877
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002878static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002879{
2880 switch (opcode) {
2881 case IB_WR_RDMA_WRITE:
2882 case IB_WR_RDMA_WRITE_WITH_IMM:
2883 return IB_WC_RDMA_WRITE;
2884 case IB_WR_SEND_WITH_IMM:
2885 case IB_WR_SEND:
2886 case IB_WR_SEND_WITH_INV:
2887 return IB_WC_SEND;
2888 case IB_WR_RDMA_READ:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03002889 case IB_WR_RDMA_READ_WITH_INV:
Ram Amraniafa0e132016-10-10 13:15:36 +03002890 return IB_WC_RDMA_READ;
2891 case IB_WR_ATOMIC_CMP_AND_SWP:
2892 return IB_WC_COMP_SWAP;
2893 case IB_WR_ATOMIC_FETCH_AND_ADD:
2894 return IB_WC_FETCH_ADD;
2895 case IB_WR_REG_MR:
2896 return IB_WC_REG_MR;
2897 case IB_WR_LOCAL_INV:
2898 return IB_WC_LOCAL_INV;
2899 default:
2900 return IB_WC_SEND;
2901 }
2902}
2903
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002904static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002905{
2906 int wq_is_full, err_wr, pbl_is_full;
2907 struct qedr_dev *dev = qp->dev;
2908
2909 /* prevent SQ overflow and/or processing of a bad WR */
2910 err_wr = wr->num_sge > qp->sq.max_sges;
2911 wq_is_full = qedr_wq_is_full(&qp->sq);
2912 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2913 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2914 if (wq_is_full || err_wr || pbl_is_full) {
2915 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2916 DP_ERR(dev,
2917 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2918 qp);
2919 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2920 }
2921
2922 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2923 DP_ERR(dev,
2924 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2925 qp);
2926 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2927 }
2928
2929 if (pbl_is_full &&
2930 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2931 DP_ERR(dev,
2932 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2933 qp);
2934 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2935 }
2936 return false;
2937 }
2938 return true;
2939}
2940
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002941static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03002942 struct ib_send_wr **bad_wr)
2943{
2944 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2945 struct qedr_qp *qp = get_qedr_qp(ibqp);
2946 struct rdma_sq_atomic_wqe_1st *awqe1;
2947 struct rdma_sq_atomic_wqe_2nd *awqe2;
2948 struct rdma_sq_atomic_wqe_3rd *awqe3;
2949 struct rdma_sq_send_wqe_2st *swqe2;
2950 struct rdma_sq_local_inv_wqe *iwqe;
2951 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2952 struct rdma_sq_send_wqe_1st *swqe;
2953 struct rdma_sq_rdma_wqe_1st *rwqe;
2954 struct rdma_sq_fmr_wqe_1st *fwqe1;
2955 struct rdma_sq_common_wqe *wqe;
2956 u32 length;
2957 int rc = 0;
2958 bool comp;
2959
2960 if (!qedr_can_post_send(qp, wr)) {
2961 *bad_wr = wr;
2962 return -ENOMEM;
2963 }
2964
2965 wqe = qed_chain_produce(&qp->sq.pbl);
2966 qp->wqe_wr_id[qp->sq.prod].signaled =
2967 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2968
2969 wqe->flags = 0;
2970 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2971 !!(wr->send_flags & IB_SEND_SOLICITED));
2972 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2973 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2974 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2975 !!(wr->send_flags & IB_SEND_FENCE));
2976 wqe->prev_wqe_size = qp->prev_wqe_size;
2977
2978 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2979
2980 switch (wr->opcode) {
2981 case IB_WR_SEND_WITH_IMM:
Kalderon, Michal551e1c62018-03-05 10:50:11 +02002982 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
2983 rc = -EINVAL;
2984 *bad_wr = wr;
2985 break;
2986 }
Ram Amraniafa0e132016-10-10 13:15:36 +03002987 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2988 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2989 swqe->wqe_size = 2;
2990 swqe2 = qed_chain_produce(&qp->sq.pbl);
2991
Jason Gunthorpe7bed7eb2018-01-11 14:43:07 -07002992 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
Ram Amraniafa0e132016-10-10 13:15:36 +03002993 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2994 wr, bad_wr);
2995 swqe->length = cpu_to_le32(length);
2996 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2997 qp->prev_wqe_size = swqe->wqe_size;
2998 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2999 break;
3000 case IB_WR_SEND:
3001 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3002 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3003
3004 swqe->wqe_size = 2;
3005 swqe2 = qed_chain_produce(&qp->sq.pbl);
3006 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3007 wr, bad_wr);
3008 swqe->length = cpu_to_le32(length);
3009 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3010 qp->prev_wqe_size = swqe->wqe_size;
3011 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3012 break;
3013 case IB_WR_SEND_WITH_INV:
3014 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3015 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3016 swqe2 = qed_chain_produce(&qp->sq.pbl);
3017 swqe->wqe_size = 2;
3018 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3019 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3020 wr, bad_wr);
3021 swqe->length = cpu_to_le32(length);
3022 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3023 qp->prev_wqe_size = swqe->wqe_size;
3024 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3025 break;
3026
3027 case IB_WR_RDMA_WRITE_WITH_IMM:
Kalderon, Michal551e1c62018-03-05 10:50:11 +02003028 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3029 rc = -EINVAL;
3030 *bad_wr = wr;
3031 break;
3032 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003033 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3034 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3035
3036 rwqe->wqe_size = 2;
3037 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3038 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3039 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3040 wr, bad_wr);
3041 rwqe->length = cpu_to_le32(length);
3042 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3043 qp->prev_wqe_size = rwqe->wqe_size;
3044 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3045 break;
3046 case IB_WR_RDMA_WRITE:
3047 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3048 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3049
3050 rwqe->wqe_size = 2;
3051 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3052 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3053 wr, bad_wr);
3054 rwqe->length = cpu_to_le32(length);
3055 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3056 qp->prev_wqe_size = rwqe->wqe_size;
3057 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3058 break;
3059 case IB_WR_RDMA_READ_WITH_INV:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03003060 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
Bart Van Assche1b8a708b2017-10-11 10:49:19 -07003061 /* fallthrough -- same is identical to RDMA READ */
Ram Amraniafa0e132016-10-10 13:15:36 +03003062
3063 case IB_WR_RDMA_READ:
3064 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3065 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3066
3067 rwqe->wqe_size = 2;
3068 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3069 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3070 wr, bad_wr);
3071 rwqe->length = cpu_to_le32(length);
3072 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3073 qp->prev_wqe_size = rwqe->wqe_size;
3074 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3075 break;
3076
3077 case IB_WR_ATOMIC_CMP_AND_SWP:
3078 case IB_WR_ATOMIC_FETCH_AND_ADD:
3079 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3080 awqe1->wqe_size = 4;
3081
3082 awqe2 = qed_chain_produce(&qp->sq.pbl);
3083 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3084 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3085
3086 awqe3 = qed_chain_produce(&qp->sq.pbl);
3087
3088 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3089 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3090 DMA_REGPAIR_LE(awqe3->swap_data,
3091 atomic_wr(wr)->compare_add);
3092 } else {
3093 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3094 DMA_REGPAIR_LE(awqe3->swap_data,
3095 atomic_wr(wr)->swap);
3096 DMA_REGPAIR_LE(awqe3->cmp_data,
3097 atomic_wr(wr)->compare_add);
3098 }
3099
3100 qedr_prepare_sq_sges(qp, NULL, wr);
3101
3102 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3103 qp->prev_wqe_size = awqe1->wqe_size;
3104 break;
3105
3106 case IB_WR_LOCAL_INV:
3107 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3108 iwqe->wqe_size = 1;
3109
3110 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3111 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3112 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3113 qp->prev_wqe_size = iwqe->wqe_size;
3114 break;
3115 case IB_WR_REG_MR:
3116 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3117 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3118 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3119 fwqe1->wqe_size = 2;
3120
3121 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3122 if (rc) {
3123 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3124 *bad_wr = wr;
3125 break;
3126 }
3127
3128 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3129 qp->prev_wqe_size = fwqe1->wqe_size;
3130 break;
3131 default:
3132 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3133 rc = -EINVAL;
3134 *bad_wr = wr;
3135 break;
3136 }
3137
3138 if (*bad_wr) {
3139 u16 value;
3140
3141 /* Restore prod to its position before
3142 * this WR was processed
3143 */
3144 value = le16_to_cpu(qp->sq.db_data.data.value);
3145 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3146
3147 /* Restore prev_wqe_size */
3148 qp->prev_wqe_size = wqe->prev_wqe_size;
3149 rc = -EINVAL;
3150 DP_ERR(dev, "POST SEND FAILED\n");
3151 }
3152
3153 return rc;
3154}
3155
3156int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3157 struct ib_send_wr **bad_wr)
3158{
3159 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3160 struct qedr_qp *qp = get_qedr_qp(ibqp);
3161 unsigned long flags;
3162 int rc = 0;
3163
3164 *bad_wr = NULL;
3165
Ram Amrani04886772016-10-10 13:15:38 +03003166 if (qp->qp_type == IB_QPT_GSI)
3167 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3168
Ram Amraniafa0e132016-10-10 13:15:36 +03003169 spin_lock_irqsave(&qp->q_lock, flags);
3170
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003171 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3172 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3173 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3174 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3175 spin_unlock_irqrestore(&qp->q_lock, flags);
3176 *bad_wr = wr;
3177 DP_DEBUG(dev, QEDR_MSG_CQ,
3178 "QP in wrong state! QP icid=0x%x state %d\n",
3179 qp->icid, qp->state);
3180 return -EINVAL;
3181 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003182 }
3183
Ram Amraniafa0e132016-10-10 13:15:36 +03003184 while (wr) {
3185 rc = __qedr_post_send(ibqp, wr, bad_wr);
3186 if (rc)
3187 break;
3188
3189 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3190
3191 qedr_inc_sw_prod(&qp->sq);
3192
3193 qp->sq.db_data.data.value++;
3194
3195 wr = wr->next;
3196 }
3197
3198 /* Trigger doorbell
3199 * If there was a failure in the first WR then it will be triggered in
3200 * vane. However this is not harmful (as long as the producer value is
3201 * unchanged). For performance reasons we avoid checking for this
3202 * redundant doorbell.
3203 */
3204 wmb();
Sinan Kaya561e5d482018-03-13 23:20:24 -04003205 writel_relaxed(qp->sq.db_data.raw, qp->sq.db);
Ram Amraniafa0e132016-10-10 13:15:36 +03003206
3207 /* Make sure write sticks */
3208 mmiowb();
3209
3210 spin_unlock_irqrestore(&qp->q_lock, flags);
3211
3212 return rc;
3213}
3214
3215int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3216 struct ib_recv_wr **bad_wr)
3217{
3218 struct qedr_qp *qp = get_qedr_qp(ibqp);
3219 struct qedr_dev *dev = qp->dev;
3220 unsigned long flags;
3221 int status = 0;
3222
Ram Amrani04886772016-10-10 13:15:38 +03003223 if (qp->qp_type == IB_QPT_GSI)
3224 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3225
Ram Amraniafa0e132016-10-10 13:15:36 +03003226 spin_lock_irqsave(&qp->q_lock, flags);
3227
Amrani, Ram922d9a42016-12-22 14:40:38 +02003228 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003229 spin_unlock_irqrestore(&qp->q_lock, flags);
3230 *bad_wr = wr;
3231 return -EINVAL;
3232 }
3233
3234 while (wr) {
3235 int i;
3236
3237 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3238 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3239 wr->num_sge > qp->rq.max_sges) {
3240 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3241 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3242 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3243 qp->rq.max_sges);
3244 status = -ENOMEM;
3245 *bad_wr = wr;
3246 break;
3247 }
3248 for (i = 0; i < wr->num_sge; i++) {
3249 u32 flags = 0;
3250 struct rdma_rq_sge *rqe =
3251 qed_chain_produce(&qp->rq.pbl);
3252
3253 /* First one must include the number
3254 * of SGE in the list
3255 */
3256 if (!i)
3257 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3258 wr->num_sge);
3259
3260 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3261 wr->sg_list[i].lkey);
3262
3263 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3264 wr->sg_list[i].length, flags);
3265 }
3266
3267 /* Special case of no sges. FW requires between 1-4 sges...
3268 * in this case we need to post 1 sge with length zero. this is
3269 * because rdma write with immediate consumes an RQ.
3270 */
3271 if (!wr->num_sge) {
3272 u32 flags = 0;
3273 struct rdma_rq_sge *rqe =
3274 qed_chain_produce(&qp->rq.pbl);
3275
3276 /* First one must include the number
3277 * of SGE in the list
3278 */
3279 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3280 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3281
3282 RQ_SGE_SET(rqe, 0, 0, flags);
3283 i = 1;
3284 }
3285
3286 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3287 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3288
3289 qedr_inc_sw_prod(&qp->rq);
3290
3291 /* Flush all the writes before signalling doorbell */
3292 wmb();
3293
3294 qp->rq.db_data.data.value++;
3295
3296 writel(qp->rq.db_data.raw, qp->rq.db);
3297
3298 /* Make sure write sticks */
3299 mmiowb();
3300
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003301 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3302 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3303 mmiowb(); /* for second doorbell */
3304 }
3305
Ram Amraniafa0e132016-10-10 13:15:36 +03003306 wr = wr->next;
3307 }
3308
3309 spin_unlock_irqrestore(&qp->q_lock, flags);
3310
3311 return status;
3312}
3313
3314static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3315{
3316 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3317
3318 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3319 cq->pbl_toggle;
3320}
3321
3322static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3323{
3324 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3325 struct qedr_qp *qp;
3326
3327 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3328 resp_cqe->qp_handle.lo,
3329 u64);
3330 return qp;
3331}
3332
3333static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3334{
3335 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3336
3337 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3338}
3339
3340/* Return latest CQE (needs processing) */
3341static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3342{
3343 return cq->latest_cqe;
3344}
3345
3346/* In fmr we need to increase the number of fmr completed counter for the fmr
3347 * algorithm determining whether we can free a pbl or not.
3348 * we need to perform this whether the work request was signaled or not. for
3349 * this purpose we call this function from the condition that checks if a wr
3350 * should be skipped, to make sure we don't miss it ( possibly this fmr
3351 * operation was not signalted)
3352 */
3353static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3354{
3355 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3356 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3357}
3358
3359static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3360 struct qedr_cq *cq, int num_entries,
3361 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3362 int force)
3363{
3364 u16 cnt = 0;
3365
3366 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3367 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3368 qedr_chk_if_fmr(qp);
3369 /* skip WC */
3370 goto next_cqe;
3371 }
3372
3373 /* fill WC */
3374 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003375 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003376 wc->wc_flags = 0;
3377 wc->src_qp = qp->id;
3378 wc->qp = &qp->ibqp;
3379
3380 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3381 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3382
3383 switch (wc->opcode) {
3384 case IB_WC_RDMA_WRITE:
3385 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3386 break;
3387 case IB_WC_COMP_SWAP:
3388 case IB_WC_FETCH_ADD:
3389 wc->byte_len = 8;
3390 break;
3391 case IB_WC_REG_MR:
3392 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3393 break;
Michal Kalderondac27382017-06-05 16:32:26 +03003394 case IB_WC_RDMA_READ:
3395 case IB_WC_SEND:
3396 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3397 break;
Ram Amraniafa0e132016-10-10 13:15:36 +03003398 default:
3399 break;
3400 }
3401
3402 num_entries--;
3403 wc++;
3404 cnt++;
3405next_cqe:
3406 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3407 qed_chain_consume(&qp->sq.pbl);
3408 qedr_inc_sw_cons(&qp->sq);
3409 }
3410
3411 return cnt;
3412}
3413
3414static int qedr_poll_cq_req(struct qedr_dev *dev,
3415 struct qedr_qp *qp, struct qedr_cq *cq,
3416 int num_entries, struct ib_wc *wc,
3417 struct rdma_cqe_requester *req)
3418{
3419 int cnt = 0;
3420
3421 switch (req->status) {
3422 case RDMA_CQE_REQ_STS_OK:
3423 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3424 IB_WC_SUCCESS, 0);
3425 break;
3426 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003427 if (qp->state != QED_ROCE_QP_STATE_ERR)
Kalderon, Michaldc728f72018-01-25 13:23:20 +02003428 DP_DEBUG(dev, QEDR_MSG_CQ,
3429 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3430 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003431 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003432 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003433 break;
3434 default:
3435 /* process all WQE before the cosumer */
3436 qp->state = QED_ROCE_QP_STATE_ERR;
3437 cnt = process_req(dev, qp, cq, num_entries, wc,
3438 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3439 wc += cnt;
3440 /* if we have extra WC fill it with actual error info */
3441 if (cnt < num_entries) {
3442 enum ib_wc_status wc_status;
3443
3444 switch (req->status) {
3445 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3446 DP_ERR(dev,
3447 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3448 cq->icid, qp->icid);
3449 wc_status = IB_WC_BAD_RESP_ERR;
3450 break;
3451 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3452 DP_ERR(dev,
3453 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3454 cq->icid, qp->icid);
3455 wc_status = IB_WC_LOC_LEN_ERR;
3456 break;
3457 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3458 DP_ERR(dev,
3459 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3460 cq->icid, qp->icid);
3461 wc_status = IB_WC_LOC_QP_OP_ERR;
3462 break;
3463 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3464 DP_ERR(dev,
3465 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3466 cq->icid, qp->icid);
3467 wc_status = IB_WC_LOC_PROT_ERR;
3468 break;
3469 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3470 DP_ERR(dev,
3471 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3472 cq->icid, qp->icid);
3473 wc_status = IB_WC_MW_BIND_ERR;
3474 break;
3475 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3476 DP_ERR(dev,
3477 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3478 cq->icid, qp->icid);
3479 wc_status = IB_WC_REM_INV_REQ_ERR;
3480 break;
3481 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3482 DP_ERR(dev,
3483 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3484 cq->icid, qp->icid);
3485 wc_status = IB_WC_REM_ACCESS_ERR;
3486 break;
3487 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3488 DP_ERR(dev,
3489 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3490 cq->icid, qp->icid);
3491 wc_status = IB_WC_REM_OP_ERR;
3492 break;
3493 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3494 DP_ERR(dev,
3495 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3496 cq->icid, qp->icid);
3497 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3498 break;
3499 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3500 DP_ERR(dev,
3501 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3502 cq->icid, qp->icid);
3503 wc_status = IB_WC_RETRY_EXC_ERR;
3504 break;
3505 default:
3506 DP_ERR(dev,
3507 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3508 cq->icid, qp->icid);
3509 wc_status = IB_WC_GENERAL_ERR;
3510 }
3511 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3512 wc_status, 1);
3513 }
3514 }
3515
3516 return cnt;
3517}
3518
Amrani, Ramb6acd712017-04-27 13:35:35 +03003519static inline int qedr_cqe_resp_status_to_ib(u8 status)
3520{
3521 switch (status) {
3522 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3523 return IB_WC_LOC_ACCESS_ERR;
3524 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3525 return IB_WC_LOC_LEN_ERR;
3526 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3527 return IB_WC_LOC_QP_OP_ERR;
3528 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3529 return IB_WC_LOC_PROT_ERR;
3530 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3531 return IB_WC_MW_BIND_ERR;
3532 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3533 return IB_WC_REM_INV_RD_REQ_ERR;
3534 case RDMA_CQE_RESP_STS_OK:
3535 return IB_WC_SUCCESS;
3536 default:
3537 return IB_WC_GENERAL_ERR;
3538 }
3539}
3540
3541static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3542 struct ib_wc *wc)
3543{
3544 wc->status = IB_WC_SUCCESS;
3545 wc->byte_len = le32_to_cpu(resp->length);
3546
3547 if (resp->flags & QEDR_RESP_IMM) {
Jason Gunthorpe7bed7eb2018-01-11 14:43:07 -07003548 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
Amrani, Ramb6acd712017-04-27 13:35:35 +03003549 wc->wc_flags |= IB_WC_WITH_IMM;
3550
3551 if (resp->flags & QEDR_RESP_RDMA)
3552 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3553
3554 if (resp->flags & QEDR_RESP_INV)
3555 return -EINVAL;
3556
3557 } else if (resp->flags & QEDR_RESP_INV) {
3558 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3559 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3560
3561 if (resp->flags & QEDR_RESP_RDMA)
3562 return -EINVAL;
3563
3564 } else if (resp->flags & QEDR_RESP_RDMA) {
3565 return -EINVAL;
3566 }
3567
3568 return 0;
3569}
3570
Ram Amraniafa0e132016-10-10 13:15:36 +03003571static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3572 struct qedr_cq *cq, struct ib_wc *wc,
3573 struct rdma_cqe_responder *resp, u64 wr_id)
3574{
Amrani, Ramb6acd712017-04-27 13:35:35 +03003575 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
Ram Amraniafa0e132016-10-10 13:15:36 +03003576 wc->opcode = IB_WC_RECV;
3577 wc->wc_flags = 0;
3578
Amrani, Ramb6acd712017-04-27 13:35:35 +03003579 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3580 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3581 DP_ERR(dev,
3582 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3583 cq, cq->icid, resp->flags);
Ram Amraniafa0e132016-10-10 13:15:36 +03003584
Amrani, Ramb6acd712017-04-27 13:35:35 +03003585 } else {
3586 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3587 if (wc->status == IB_WC_GENERAL_ERR)
3588 DP_ERR(dev,
3589 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3590 cq, cq->icid, resp->status);
Ram Amraniafa0e132016-10-10 13:15:36 +03003591 }
3592
Amrani, Ramb6acd712017-04-27 13:35:35 +03003593 /* Fill the rest of the WC */
Amrani, Ram27035a12016-12-22 14:40:39 +02003594 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003595 wc->src_qp = qp->id;
3596 wc->qp = &qp->ibqp;
3597 wc->wr_id = wr_id;
3598}
3599
3600static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3601 struct qedr_cq *cq, struct ib_wc *wc,
3602 struct rdma_cqe_responder *resp)
3603{
3604 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3605
3606 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3607
3608 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3609 qed_chain_consume(&qp->rq.pbl);
3610 qedr_inc_sw_cons(&qp->rq);
3611
3612 return 1;
3613}
3614
3615static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3616 int num_entries, struct ib_wc *wc, u16 hw_cons)
3617{
3618 u16 cnt = 0;
3619
3620 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3621 /* fill WC */
3622 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003623 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003624 wc->wc_flags = 0;
3625 wc->src_qp = qp->id;
3626 wc->byte_len = 0;
3627 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3628 wc->qp = &qp->ibqp;
3629 num_entries--;
3630 wc++;
3631 cnt++;
3632 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3633 qed_chain_consume(&qp->rq.pbl);
3634 qedr_inc_sw_cons(&qp->rq);
3635 }
3636
3637 return cnt;
3638}
3639
3640static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3641 struct rdma_cqe_responder *resp, int *update)
3642{
3643 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3644 consume_cqe(cq);
3645 *update |= 1;
3646 }
3647}
3648
3649static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3650 struct qedr_cq *cq, int num_entries,
3651 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3652 int *update)
3653{
3654 int cnt;
3655
3656 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3657 cnt = process_resp_flush(qp, cq, num_entries, wc,
3658 resp->rq_cons);
3659 try_consume_resp_cqe(cq, qp, resp, update);
3660 } else {
3661 cnt = process_resp_one(dev, qp, cq, wc, resp);
3662 consume_cqe(cq);
3663 *update |= 1;
3664 }
3665
3666 return cnt;
3667}
3668
3669static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3670 struct rdma_cqe_requester *req, int *update)
3671{
3672 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3673 consume_cqe(cq);
3674 *update |= 1;
3675 }
3676}
3677
3678int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3679{
3680 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3681 struct qedr_cq *cq = get_qedr_cq(ibcq);
Kalderon, Michale3fd1122018-03-05 10:50:10 +02003682 union rdma_cqe *cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003683 u32 old_cons, new_cons;
3684 unsigned long flags;
3685 int update = 0;
3686 int done = 0;
3687
Amrani, Ram4dd72632017-04-27 13:35:34 +03003688 if (cq->destroyed) {
3689 DP_ERR(dev,
3690 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3691 cq, cq->icid);
3692 return 0;
3693 }
3694
Ram Amrani04886772016-10-10 13:15:38 +03003695 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3696 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3697
Ram Amraniafa0e132016-10-10 13:15:36 +03003698 spin_lock_irqsave(&cq->cq_lock, flags);
Kalderon, Michale3fd1122018-03-05 10:50:10 +02003699 cqe = cq->latest_cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003700 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3701 while (num_entries && is_valid_cqe(cq, cqe)) {
3702 struct qedr_qp *qp;
3703 int cnt = 0;
3704
3705 /* prevent speculative reads of any field of CQE */
3706 rmb();
3707
3708 qp = cqe_get_qp(cqe);
3709 if (!qp) {
3710 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3711 break;
3712 }
3713
3714 wc->qp = &qp->ibqp;
3715
3716 switch (cqe_get_type(cqe)) {
3717 case RDMA_CQE_TYPE_REQUESTER:
3718 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3719 &cqe->req);
3720 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3721 break;
3722 case RDMA_CQE_TYPE_RESPONDER_RQ:
3723 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3724 &cqe->resp, &update);
3725 break;
3726 case RDMA_CQE_TYPE_INVALID:
3727 default:
3728 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3729 cqe_get_type(cqe));
3730 }
3731 num_entries -= cnt;
3732 wc += cnt;
3733 done += cnt;
3734
3735 cqe = get_cqe(cq);
3736 }
3737 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3738
3739 cq->cq_cons += new_cons - old_cons;
3740
3741 if (update)
3742 /* doorbell notifies abount latest VALID entry,
3743 * but chain already point to the next INVALID one
3744 */
3745 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3746
3747 spin_unlock_irqrestore(&cq->cq_lock, flags);
3748 return done;
3749}
Ram Amrani993d1b52016-10-10 13:15:39 +03003750
3751int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3752 u8 port_num,
3753 const struct ib_wc *in_wc,
3754 const struct ib_grh *in_grh,
3755 const struct ib_mad_hdr *mad_hdr,
3756 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3757 size_t *out_mad_size, u16 *out_mad_pkey_index)
3758{
3759 struct qedr_dev *dev = get_qedr_dev(ibdev);
3760
3761 DP_DEBUG(dev, QEDR_MSG_GSI,
3762 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3763 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3764 mad_hdr->class_specific, mad_hdr->class_version,
3765 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3766 return IB_MAD_RESULT_SUCCESS;
3767}