blob: 60b19d4935e3a3d76097fb89a92ef94f92a42cce [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Ram Amranicecbcdd2016-10-10 13:15:34 +030052#include "qedr_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
56int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57{
58 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
59 return -EINVAL;
60
61 *pkey = QEDR_ROCE_PKEY_DEFAULT;
62 return 0;
63}
64
Ram Amraniac1b36e2016-10-10 13:15:32 +030065int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
66 union ib_gid *sgid)
67{
68 struct qedr_dev *dev = get_qedr_dev(ibdev);
69 int rc = 0;
70
71 if (!rdma_cap_roce_gid_table(ibdev, port))
72 return -ENODEV;
73
74 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
75 if (rc == -EAGAIN) {
76 memcpy(sgid, &zgid, sizeof(*sgid));
77 return 0;
78 }
79
80 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
81 sgid->global.interface_id, sgid->global.subnet_prefix);
82
83 return rc;
84}
85
86int qedr_add_gid(struct ib_device *device, u8 port_num,
87 unsigned int index, const union ib_gid *gid,
88 const struct ib_gid_attr *attr, void **context)
89{
90 if (!rdma_cap_roce_gid_table(device, port_num))
91 return -EINVAL;
92
93 if (port_num > QEDR_MAX_PORT)
94 return -EINVAL;
95
96 if (!context)
97 return -EINVAL;
98
99 return 0;
100}
101
102int qedr_del_gid(struct ib_device *device, u8 port_num,
103 unsigned int index, void **context)
104{
105 if (!rdma_cap_roce_gid_table(device, port_num))
106 return -EINVAL;
107
108 if (port_num > QEDR_MAX_PORT)
109 return -EINVAL;
110
111 if (!context)
112 return -EINVAL;
113
114 return 0;
115}
116
117int qedr_query_device(struct ib_device *ibdev,
118 struct ib_device_attr *attr, struct ib_udata *udata)
119{
120 struct qedr_dev *dev = get_qedr_dev(ibdev);
121 struct qedr_device_attr *qattr = &dev->attr;
122
123 if (!dev->rdma_ctx) {
124 DP_ERR(dev,
125 "qedr_query_device called with invalid params rdma_ctx=%p\n",
126 dev->rdma_ctx);
127 return -EINVAL;
128 }
129
130 memset(attr, 0, sizeof(*attr));
131
132 attr->fw_ver = qattr->fw_ver;
133 attr->sys_image_guid = qattr->sys_image_guid;
134 attr->max_mr_size = qattr->max_mr_size;
135 attr->page_size_cap = qattr->page_size_caps;
136 attr->vendor_id = qattr->vendor_id;
137 attr->vendor_part_id = qattr->vendor_part_id;
138 attr->hw_ver = qattr->hw_ver;
139 attr->max_qp = qattr->max_qp;
140 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
141 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
142 IB_DEVICE_RC_RNR_NAK_GEN |
143 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
144
145 attr->max_sge = qattr->max_sge;
146 attr->max_sge_rd = qattr->max_sge;
147 attr->max_cq = qattr->max_cq;
148 attr->max_cqe = qattr->max_cqe;
149 attr->max_mr = qattr->max_mr;
150 attr->max_mw = qattr->max_mw;
151 attr->max_pd = qattr->max_pd;
152 attr->atomic_cap = dev->atomic_cap;
153 attr->max_fmr = qattr->max_fmr;
154 attr->max_map_per_fmr = 16;
155 attr->max_qp_init_rd_atom =
156 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
157 attr->max_qp_rd_atom =
158 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
159 attr->max_qp_init_rd_atom);
160
161 attr->max_srq = qattr->max_srq;
162 attr->max_srq_sge = qattr->max_srq_sge;
163 attr->max_srq_wr = qattr->max_srq_wr;
164
165 attr->local_ca_ack_delay = qattr->dev_ack_delay;
166 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
167 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
168 attr->max_ah = qattr->max_ah;
169
170 return 0;
171}
172
173#define QEDR_SPEED_SDR (1)
174#define QEDR_SPEED_DDR (2)
175#define QEDR_SPEED_QDR (4)
176#define QEDR_SPEED_FDR10 (8)
177#define QEDR_SPEED_FDR (16)
178#define QEDR_SPEED_EDR (32)
179
180static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
181 u8 *ib_width)
182{
183 switch (speed) {
184 case 1000:
185 *ib_speed = QEDR_SPEED_SDR;
186 *ib_width = IB_WIDTH_1X;
187 break;
188 case 10000:
189 *ib_speed = QEDR_SPEED_QDR;
190 *ib_width = IB_WIDTH_1X;
191 break;
192
193 case 20000:
194 *ib_speed = QEDR_SPEED_DDR;
195 *ib_width = IB_WIDTH_4X;
196 break;
197
198 case 25000:
199 *ib_speed = QEDR_SPEED_EDR;
200 *ib_width = IB_WIDTH_1X;
201 break;
202
203 case 40000:
204 *ib_speed = QEDR_SPEED_QDR;
205 *ib_width = IB_WIDTH_4X;
206 break;
207
208 case 50000:
209 *ib_speed = QEDR_SPEED_QDR;
210 *ib_width = IB_WIDTH_4X;
211 break;
212
213 case 100000:
214 *ib_speed = QEDR_SPEED_EDR;
215 *ib_width = IB_WIDTH_4X;
216 break;
217
218 default:
219 /* Unsupported */
220 *ib_speed = QEDR_SPEED_SDR;
221 *ib_width = IB_WIDTH_1X;
222 }
223}
224
225int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
226{
227 struct qedr_dev *dev;
228 struct qed_rdma_port *rdma_port;
229
230 dev = get_qedr_dev(ibdev);
231 if (port > 1) {
232 DP_ERR(dev, "invalid_port=0x%x\n", port);
233 return -EINVAL;
234 }
235
236 if (!dev->rdma_ctx) {
237 DP_ERR(dev, "rdma_ctx is NULL\n");
238 return -EINVAL;
239 }
240
241 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300242
Or Gerlitzc4550c62017-01-24 13:02:39 +0200243 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300244 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
245 attr->state = IB_PORT_ACTIVE;
246 attr->phys_state = 5;
247 } else {
248 attr->state = IB_PORT_DOWN;
249 attr->phys_state = 3;
250 }
251 attr->max_mtu = IB_MTU_4096;
252 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
253 attr->lid = 0;
254 attr->lmc = 0;
255 attr->sm_lid = 0;
256 attr->sm_sl = 0;
257 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
258 attr->gid_tbl_len = QEDR_MAX_SGID;
259 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
260 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
261 attr->qkey_viol_cntr = 0;
262 get_link_speed_and_width(rdma_port->link_speed,
263 &attr->active_speed, &attr->active_width);
264 attr->max_msg_sz = rdma_port->max_msg_size;
265 attr->max_vl_num = 4;
266
267 return 0;
268}
269
270int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
271 struct ib_port_modify *props)
272{
273 struct qedr_dev *dev;
274
275 dev = get_qedr_dev(ibdev);
276 if (port > 1) {
277 DP_ERR(dev, "invalid_port=0x%x\n", port);
278 return -EINVAL;
279 }
280
281 return 0;
282}
283
284static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
285 unsigned long len)
286{
287 struct qedr_mm *mm;
288
289 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
290 if (!mm)
291 return -ENOMEM;
292
293 mm->key.phy_addr = phy_addr;
294 /* This function might be called with a length which is not a multiple
295 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
296 * forces this granularity by increasing the requested size if needed.
297 * When qedr_mmap is called, it will search the list with the updated
298 * length as a key. To prevent search failures, the length is rounded up
299 * in advance to PAGE_SIZE.
300 */
301 mm->key.len = roundup(len, PAGE_SIZE);
302 INIT_LIST_HEAD(&mm->entry);
303
304 mutex_lock(&uctx->mm_list_lock);
305 list_add(&mm->entry, &uctx->mm_head);
306 mutex_unlock(&uctx->mm_list_lock);
307
308 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
309 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
310 (unsigned long long)mm->key.phy_addr,
311 (unsigned long)mm->key.len, uctx);
312
313 return 0;
314}
315
316static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
317 unsigned long len)
318{
319 bool found = false;
320 struct qedr_mm *mm;
321
322 mutex_lock(&uctx->mm_list_lock);
323 list_for_each_entry(mm, &uctx->mm_head, entry) {
324 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
325 continue;
326
327 found = true;
328 break;
329 }
330 mutex_unlock(&uctx->mm_list_lock);
331 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
332 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
333 mm->key.phy_addr, mm->key.len, uctx, found);
334
335 return found;
336}
337
338struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
339 struct ib_udata *udata)
340{
341 int rc;
342 struct qedr_ucontext *ctx;
343 struct qedr_alloc_ucontext_resp uresp;
344 struct qedr_dev *dev = get_qedr_dev(ibdev);
345 struct qed_rdma_add_user_out_params oparams;
346
347 if (!udata)
348 return ERR_PTR(-EFAULT);
349
350 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
351 if (!ctx)
352 return ERR_PTR(-ENOMEM);
353
354 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
355 if (rc) {
356 DP_ERR(dev,
357 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
358 rc);
359 goto err;
360 }
361
362 ctx->dpi = oparams.dpi;
363 ctx->dpi_addr = oparams.dpi_addr;
364 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
365 ctx->dpi_size = oparams.dpi_size;
366 INIT_LIST_HEAD(&ctx->mm_head);
367 mutex_init(&ctx->mm_list_lock);
368
369 memset(&uresp, 0, sizeof(uresp));
370
371 uresp.db_pa = ctx->dpi_phys_addr;
372 uresp.db_size = ctx->dpi_size;
373 uresp.max_send_wr = dev->attr.max_sqe;
374 uresp.max_recv_wr = dev->attr.max_rqe;
375 uresp.max_srq_wr = dev->attr.max_srq_wr;
376 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
377 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
378 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
379 uresp.max_cqes = QEDR_MAX_CQES;
380
381 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
382 if (rc)
383 goto err;
384
385 ctx->dev = dev;
386
387 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
388 if (rc)
389 goto err;
390
391 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
392 &ctx->ibucontext);
393 return &ctx->ibucontext;
394
395err:
396 kfree(ctx);
397 return ERR_PTR(rc);
398}
399
400int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
401{
402 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
403 struct qedr_mm *mm, *tmp;
404 int status = 0;
405
406 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
407 uctx);
408 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
409
410 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
411 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
412 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
413 mm->key.phy_addr, mm->key.len, uctx);
414 list_del(&mm->entry);
415 kfree(mm);
416 }
417
418 kfree(uctx);
419 return status;
420}
421
422int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
423{
424 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
425 struct qedr_dev *dev = get_qedr_dev(context->device);
426 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
427 u64 unmapped_db = dev->db_phys_addr;
428 unsigned long len = (vma->vm_end - vma->vm_start);
429 int rc = 0;
430 bool found;
431
432 DP_DEBUG(dev, QEDR_MSG_INIT,
433 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
434 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
435 if (vma->vm_start & (PAGE_SIZE - 1)) {
436 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
437 vma->vm_start);
438 return -EINVAL;
439 }
440
441 found = qedr_search_mmap(ucontext, vm_page, len);
442 if (!found) {
443 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
444 vma->vm_pgoff);
445 return -EINVAL;
446 }
447
448 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
449
450 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
451 dev->db_size))) {
452 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
453 if (vma->vm_flags & VM_READ) {
454 DP_ERR(dev, "Trying to map doorbell bar for read\n");
455 return -EPERM;
456 }
457
458 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
459
460 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
461 PAGE_SIZE, vma->vm_page_prot);
462 } else {
463 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
464 rc = remap_pfn_range(vma, vma->vm_start,
465 vma->vm_pgoff, len, vma->vm_page_prot);
466 }
467 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
468 return rc;
469}
Ram Amrania7efd772016-10-10 13:15:33 +0300470
471struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
472 struct ib_ucontext *context, struct ib_udata *udata)
473{
474 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300475 struct qedr_pd *pd;
476 u16 pd_id;
477 int rc;
478
479 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
480 (udata && context) ? "User Lib" : "Kernel");
481
482 if (!dev->rdma_ctx) {
483 DP_ERR(dev, "invlaid RDMA context\n");
484 return ERR_PTR(-EINVAL);
485 }
486
487 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
488 if (!pd)
489 return ERR_PTR(-ENOMEM);
490
Ram Amrani9c1e0222017-01-24 13:51:42 +0200491 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
492 if (rc)
493 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300494
Ram Amrania7efd772016-10-10 13:15:33 +0300495 pd->pd_id = pd_id;
496
497 if (udata && context) {
Ram Amrani9c1e0222017-01-24 13:51:42 +0200498 struct qedr_alloc_pd_uresp uresp;
499
500 uresp.pd_id = pd_id;
501
Ram Amrania7efd772016-10-10 13:15:33 +0300502 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200503 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300504 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200505 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
506 goto err;
507 }
508
509 pd->uctx = get_qedr_ucontext(context);
510 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300511 }
512
513 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200514
515err:
516 kfree(pd);
517 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300518}
519
520int qedr_dealloc_pd(struct ib_pd *ibpd)
521{
522 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
523 struct qedr_pd *pd = get_qedr_pd(ibpd);
524
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100525 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300526 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100527 return -EINVAL;
528 }
Ram Amrania7efd772016-10-10 13:15:33 +0300529
530 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
531 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
532
533 kfree(pd);
534
535 return 0;
536}
537
538static void qedr_free_pbl(struct qedr_dev *dev,
539 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
540{
541 struct pci_dev *pdev = dev->pdev;
542 int i;
543
544 for (i = 0; i < pbl_info->num_pbls; i++) {
545 if (!pbl[i].va)
546 continue;
547 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
548 pbl[i].va, pbl[i].pa);
549 }
550
551 kfree(pbl);
552}
553
554#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
555#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
556
557#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
558#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
559#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
560
561static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
562 struct qedr_pbl_info *pbl_info,
563 gfp_t flags)
564{
565 struct pci_dev *pdev = dev->pdev;
566 struct qedr_pbl *pbl_table;
567 dma_addr_t *pbl_main_tbl;
568 dma_addr_t pa;
569 void *va;
570 int i;
571
572 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
573 if (!pbl_table)
574 return ERR_PTR(-ENOMEM);
575
576 for (i = 0; i < pbl_info->num_pbls; i++) {
577 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
578 &pa, flags);
579 if (!va)
580 goto err;
581
582 memset(va, 0, pbl_info->pbl_size);
583 pbl_table[i].va = va;
584 pbl_table[i].pa = pa;
585 }
586
587 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
588 * the first one with physical pointers to all of the rest
589 */
590 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
591 for (i = 0; i < pbl_info->num_pbls - 1; i++)
592 pbl_main_tbl[i] = pbl_table[i + 1].pa;
593
594 return pbl_table;
595
596err:
597 for (i--; i >= 0; i--)
598 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
599 pbl_table[i].va, pbl_table[i].pa);
600
601 qedr_free_pbl(dev, pbl_info, pbl_table);
602
603 return ERR_PTR(-ENOMEM);
604}
605
606static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
607 struct qedr_pbl_info *pbl_info,
608 u32 num_pbes, int two_layer_capable)
609{
610 u32 pbl_capacity;
611 u32 pbl_size;
612 u32 num_pbls;
613
614 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
615 if (num_pbes > MAX_PBES_TWO_LAYER) {
616 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
617 num_pbes);
618 return -EINVAL;
619 }
620
621 /* calculate required pbl page size */
622 pbl_size = MIN_FW_PBL_PAGE_SIZE;
623 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
624 NUM_PBES_ON_PAGE(pbl_size);
625
626 while (pbl_capacity < num_pbes) {
627 pbl_size *= 2;
628 pbl_capacity = pbl_size / sizeof(u64);
629 pbl_capacity = pbl_capacity * pbl_capacity;
630 }
631
632 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
633 num_pbls++; /* One for the layer0 ( points to the pbls) */
634 pbl_info->two_layered = true;
635 } else {
636 /* One layered PBL */
637 num_pbls = 1;
638 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
639 roundup_pow_of_two((num_pbes * sizeof(u64))));
640 pbl_info->two_layered = false;
641 }
642
643 pbl_info->num_pbls = num_pbls;
644 pbl_info->pbl_size = pbl_size;
645 pbl_info->num_pbes = num_pbes;
646
647 DP_DEBUG(dev, QEDR_MSG_MR,
648 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
649 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
650
651 return 0;
652}
653
654static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
655 struct qedr_pbl *pbl,
656 struct qedr_pbl_info *pbl_info)
657{
658 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
659 struct qedr_pbl *pbl_tbl;
660 struct scatterlist *sg;
661 struct regpair *pbe;
662 int entry;
663 u32 addr;
664
665 if (!pbl_info->num_pbes)
666 return;
667
668 /* If we have a two layered pbl, the first pbl points to the rest
669 * of the pbls and the first entry lays on the second pbl in the table
670 */
671 if (pbl_info->two_layered)
672 pbl_tbl = &pbl[1];
673 else
674 pbl_tbl = pbl;
675
676 pbe = (struct regpair *)pbl_tbl->va;
677 if (!pbe) {
678 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
679 return;
680 }
681
682 pbe_cnt = 0;
683
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300684 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300685
686 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
687 pages = sg_dma_len(sg) >> shift;
688 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
689 /* store the page address in pbe */
690 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300691 (pg_cnt << shift));
Ram Amrania7efd772016-10-10 13:15:33 +0300692 addr = upper_32_bits(sg_dma_address(sg) +
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300693 (pg_cnt << shift));
Ram Amrania7efd772016-10-10 13:15:33 +0300694 pbe->hi = cpu_to_le32(addr);
695 pbe_cnt++;
696 total_num_pbes++;
697 pbe++;
698
699 if (total_num_pbes == pbl_info->num_pbes)
700 return;
701
702 /* If the given pbl is full storing the pbes,
703 * move to next pbl.
704 */
705 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
706 pbl_tbl++;
707 pbe = (struct regpair *)pbl_tbl->va;
708 pbe_cnt = 0;
709 }
710 }
711 }
712}
713
714static int qedr_copy_cq_uresp(struct qedr_dev *dev,
715 struct qedr_cq *cq, struct ib_udata *udata)
716{
717 struct qedr_create_cq_uresp uresp;
718 int rc;
719
720 memset(&uresp, 0, sizeof(uresp));
721
722 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
723 uresp.icid = cq->icid;
724
725 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
726 if (rc)
727 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
728
729 return rc;
730}
731
732static void consume_cqe(struct qedr_cq *cq)
733{
734 if (cq->latest_cqe == cq->toggle_cqe)
735 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
736
737 cq->latest_cqe = qed_chain_consume(&cq->pbl);
738}
739
740static inline int qedr_align_cq_entries(int entries)
741{
742 u64 size, aligned_size;
743
744 /* We allocate an extra entry that we don't report to the FW. */
745 size = (entries + 1) * QEDR_CQE_SIZE;
746 aligned_size = ALIGN(size, PAGE_SIZE);
747
748 return aligned_size / QEDR_CQE_SIZE;
749}
750
751static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
752 struct qedr_dev *dev,
753 struct qedr_userq *q,
754 u64 buf_addr, size_t buf_len,
755 int access, int dmasync)
756{
757 int page_cnt;
758 int rc;
759
760 q->buf_addr = buf_addr;
761 q->buf_len = buf_len;
762 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
763 if (IS_ERR(q->umem)) {
764 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
765 PTR_ERR(q->umem));
766 return PTR_ERR(q->umem);
767 }
768
769 page_cnt = ib_umem_page_count(q->umem);
770 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
771 if (rc)
772 goto err0;
773
774 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +0100775 if (IS_ERR(q->pbl_tbl)) {
776 rc = PTR_ERR(q->pbl_tbl);
Ram Amrania7efd772016-10-10 13:15:33 +0300777 goto err0;
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +0100778 }
Ram Amrania7efd772016-10-10 13:15:33 +0300779
780 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
781
782 return 0;
783
784err0:
785 ib_umem_release(q->umem);
786
787 return rc;
788}
789
790static inline void qedr_init_cq_params(struct qedr_cq *cq,
791 struct qedr_ucontext *ctx,
792 struct qedr_dev *dev, int vector,
793 int chain_entries, int page_cnt,
794 u64 pbl_ptr,
795 struct qed_rdma_create_cq_in_params
796 *params)
797{
798 memset(params, 0, sizeof(*params));
799 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
800 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
801 params->cnq_id = vector;
802 params->cq_size = chain_entries - 1;
803 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
804 params->pbl_num_pages = page_cnt;
805 params->pbl_ptr = pbl_ptr;
806 params->pbl_two_level = 0;
807}
808
809static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
810{
811 /* Flush data before signalling doorbell */
812 wmb();
813 cq->db.data.agg_flags = flags;
814 cq->db.data.value = cpu_to_le32(cons);
815 writeq(cq->db.raw, cq->db_addr);
816
817 /* Make sure write would stick */
818 mmiowb();
819}
820
821int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
822{
823 struct qedr_cq *cq = get_qedr_cq(ibcq);
824 unsigned long sflags;
825
826 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
827 return 0;
828
829 spin_lock_irqsave(&cq->cq_lock, sflags);
830
831 cq->arm_flags = 0;
832
833 if (flags & IB_CQ_SOLICITED)
834 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
835
836 if (flags & IB_CQ_NEXT_COMP)
837 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
838
839 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
840
841 spin_unlock_irqrestore(&cq->cq_lock, sflags);
842
843 return 0;
844}
845
846struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
847 const struct ib_cq_init_attr *attr,
848 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
849{
850 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
851 struct qed_rdma_destroy_cq_out_params destroy_oparams;
852 struct qed_rdma_destroy_cq_in_params destroy_iparams;
853 struct qedr_dev *dev = get_qedr_dev(ibdev);
854 struct qed_rdma_create_cq_in_params params;
855 struct qedr_create_cq_ureq ureq;
856 int vector = attr->comp_vector;
857 int entries = attr->cqe;
858 struct qedr_cq *cq;
859 int chain_entries;
860 int page_cnt;
861 u64 pbl_ptr;
862 u16 icid;
863 int rc;
864
865 DP_DEBUG(dev, QEDR_MSG_INIT,
866 "create_cq: called from %s. entries=%d, vector=%d\n",
867 udata ? "User Lib" : "Kernel", entries, vector);
868
869 if (entries > QEDR_MAX_CQES) {
870 DP_ERR(dev,
871 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
872 entries, QEDR_MAX_CQES);
873 return ERR_PTR(-EINVAL);
874 }
875
876 chain_entries = qedr_align_cq_entries(entries);
877 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
878
879 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
880 if (!cq)
881 return ERR_PTR(-ENOMEM);
882
883 if (udata) {
884 memset(&ureq, 0, sizeof(ureq));
885 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
886 DP_ERR(dev,
887 "create cq: problem copying data from user space\n");
888 goto err0;
889 }
890
891 if (!ureq.len) {
892 DP_ERR(dev,
893 "create cq: cannot create a cq with 0 entries\n");
894 goto err0;
895 }
896
897 cq->cq_type = QEDR_CQ_TYPE_USER;
898
899 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
900 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
901 if (rc)
902 goto err0;
903
904 pbl_ptr = cq->q.pbl_tbl->pa;
905 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200906
907 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300908 } else {
909 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
910
911 rc = dev->ops->common->chain_alloc(dev->cdev,
912 QED_CHAIN_USE_TO_CONSUME,
913 QED_CHAIN_MODE_PBL,
914 QED_CHAIN_CNT_TYPE_U32,
915 chain_entries,
916 sizeof(union rdma_cqe),
917 &cq->pbl);
918 if (rc)
919 goto err1;
920
921 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
922 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200923 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300924 }
925
926 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
927 pbl_ptr, &params);
928
929 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
930 if (rc)
931 goto err2;
932
933 cq->icid = icid;
934 cq->sig = QEDR_CQ_MAGIC_NUMBER;
935 spin_lock_init(&cq->cq_lock);
936
937 if (ib_ctx) {
938 rc = qedr_copy_cq_uresp(dev, cq, udata);
939 if (rc)
940 goto err3;
941 } else {
942 /* Generate doorbell address. */
943 cq->db_addr = dev->db_addr +
944 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
945 cq->db.data.icid = cq->icid;
946 cq->db.data.params = DB_AGG_CMD_SET <<
947 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
948
949 /* point to the very last element, passing it we will toggle */
950 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
951 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
952 cq->latest_cqe = NULL;
953 consume_cqe(cq);
954 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
955 }
956
957 DP_DEBUG(dev, QEDR_MSG_CQ,
958 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
959 cq->icid, cq, params.cq_size);
960
961 return &cq->ibcq;
962
963err3:
964 destroy_iparams.icid = cq->icid;
965 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
966 &destroy_oparams);
967err2:
968 if (udata)
969 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
970 else
971 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
972err1:
973 if (udata)
974 ib_umem_release(cq->q.umem);
975err0:
976 kfree(cq);
977 return ERR_PTR(-EINVAL);
978}
979
980int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
981{
982 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
983 struct qedr_cq *cq = get_qedr_cq(ibcq);
984
985 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
986
987 return 0;
988}
989
990int qedr_destroy_cq(struct ib_cq *ibcq)
991{
992 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
993 struct qed_rdma_destroy_cq_out_params oparams;
994 struct qed_rdma_destroy_cq_in_params iparams;
995 struct qedr_cq *cq = get_qedr_cq(ibcq);
996
997 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
998
999 /* GSIs CQs are handled by driver, so they don't exist in the FW */
1000 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
Amrani, Rama1211352016-12-22 14:40:34 +02001001 int rc;
1002
Ram Amrania7efd772016-10-10 13:15:33 +03001003 iparams.icid = cq->icid;
Amrani, Rama1211352016-12-22 14:40:34 +02001004 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
1005 &oparams);
1006 if (rc)
1007 return rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001008 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1009 }
1010
1011 if (ibcq->uobject && ibcq->uobject->context) {
1012 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1013 ib_umem_release(cq->q.umem);
1014 }
1015
1016 kfree(cq);
1017
1018 return 0;
1019}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001020
1021static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1022 struct ib_qp_attr *attr,
1023 int attr_mask,
1024 struct qed_rdma_modify_qp_in_params
1025 *qp_params)
1026{
1027 enum rdma_network_type nw_type;
1028 struct ib_gid_attr gid_attr;
1029 union ib_gid gid;
1030 u32 ipv4_addr;
1031 int rc = 0;
1032 int i;
1033
1034 rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1035 attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1036 if (rc)
1037 return rc;
1038
1039 if (!memcmp(&gid, &zgid, sizeof(gid)))
1040 return -ENOENT;
1041
1042 if (gid_attr.ndev) {
1043 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1044
1045 dev_put(gid_attr.ndev);
1046 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1047 switch (nw_type) {
1048 case RDMA_NETWORK_IPV6:
1049 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1050 sizeof(qp_params->sgid));
1051 memcpy(&qp_params->dgid.bytes[0],
1052 &attr->ah_attr.grh.dgid,
1053 sizeof(qp_params->dgid));
1054 qp_params->roce_mode = ROCE_V2_IPV6;
1055 SET_FIELD(qp_params->modify_flags,
1056 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1057 break;
1058 case RDMA_NETWORK_IB:
1059 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1060 sizeof(qp_params->sgid));
1061 memcpy(&qp_params->dgid.bytes[0],
1062 &attr->ah_attr.grh.dgid,
1063 sizeof(qp_params->dgid));
1064 qp_params->roce_mode = ROCE_V1;
1065 break;
1066 case RDMA_NETWORK_IPV4:
1067 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1068 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1069 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1070 qp_params->sgid.ipv4_addr = ipv4_addr;
1071 ipv4_addr =
1072 qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1073 qp_params->dgid.ipv4_addr = ipv4_addr;
1074 SET_FIELD(qp_params->modify_flags,
1075 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1076 qp_params->roce_mode = ROCE_V2_IPV4;
1077 break;
1078 }
1079 }
1080
1081 for (i = 0; i < 4; i++) {
1082 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1083 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1084 }
1085
1086 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1087 qp_params->vlan_id = 0;
1088
1089 return 0;
1090}
1091
Ram Amranicecbcdd2016-10-10 13:15:34 +03001092static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1093 struct ib_qp_init_attr *attrs)
1094{
1095 struct qedr_device_attr *qattr = &dev->attr;
1096
1097 /* QP0... attrs->qp_type == IB_QPT_GSI */
1098 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1099 DP_DEBUG(dev, QEDR_MSG_QP,
1100 "create qp: unsupported qp type=0x%x requested\n",
1101 attrs->qp_type);
1102 return -EINVAL;
1103 }
1104
1105 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1106 DP_ERR(dev,
1107 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1108 attrs->cap.max_send_wr, qattr->max_sqe);
1109 return -EINVAL;
1110 }
1111
1112 if (attrs->cap.max_inline_data > qattr->max_inline) {
1113 DP_ERR(dev,
1114 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1115 attrs->cap.max_inline_data, qattr->max_inline);
1116 return -EINVAL;
1117 }
1118
1119 if (attrs->cap.max_send_sge > qattr->max_sge) {
1120 DP_ERR(dev,
1121 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1122 attrs->cap.max_send_sge, qattr->max_sge);
1123 return -EINVAL;
1124 }
1125
1126 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1127 DP_ERR(dev,
1128 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1129 attrs->cap.max_recv_sge, qattr->max_sge);
1130 return -EINVAL;
1131 }
1132
1133 /* Unprivileged user space cannot create special QP */
1134 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1135 DP_ERR(dev,
1136 "create qp: userspace can't create special QPs of type=0x%x\n",
1137 attrs->qp_type);
1138 return -EINVAL;
1139 }
1140
1141 return 0;
1142}
1143
1144static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1145 struct qedr_qp *qp)
1146{
1147 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1148 uresp->rq_icid = qp->icid;
1149}
1150
1151static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1152 struct qedr_qp *qp)
1153{
1154 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1155 uresp->sq_icid = qp->icid + 1;
1156}
1157
1158static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1159 struct qedr_qp *qp, struct ib_udata *udata)
1160{
1161 struct qedr_create_qp_uresp uresp;
1162 int rc;
1163
1164 memset(&uresp, 0, sizeof(uresp));
1165 qedr_copy_sq_uresp(&uresp, qp);
1166 qedr_copy_rq_uresp(&uresp, qp);
1167
1168 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1169 uresp.qp_id = qp->qp_id;
1170
1171 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1172 if (rc)
1173 DP_ERR(dev,
1174 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1175 qp->icid);
1176
1177 return rc;
1178}
1179
Amrani, Ramdf158562016-12-22 14:52:24 +02001180static void qedr_set_common_qp_params(struct qedr_dev *dev,
1181 struct qedr_qp *qp,
1182 struct qedr_pd *pd,
1183 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001184{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001185 spin_lock_init(&qp->q_lock);
Amrani, Ramdf158562016-12-22 14:52:24 +02001186 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001187 qp->qp_type = attrs->qp_type;
1188 qp->max_inline_data = attrs->cap.max_inline_data;
1189 qp->sq.max_sges = attrs->cap.max_send_sge;
1190 qp->state = QED_ROCE_QP_STATE_RESET;
1191 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1192 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1193 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1194 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001195 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001196
1197 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001198 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1199 qp->rq.max_sges, qp->rq_cq->icid);
1200 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001201 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1202 pd->pd_id, qp->qp_type, qp->max_inline_data,
1203 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1204 DP_DEBUG(dev, QEDR_MSG_QP,
1205 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1206 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001207}
1208
Amrani, Ramdf158562016-12-22 14:52:24 +02001209static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001210{
1211 qp->sq.db = dev->db_addr +
1212 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1213 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001214 qp->rq.db = dev->db_addr +
1215 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1216 qp->rq.db_data.data.icid = qp->icid;
1217}
1218
Amrani, Ramdf158562016-12-22 14:52:24 +02001219static inline void
1220qedr_init_common_qp_in_params(struct qedr_dev *dev,
1221 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001222 struct qedr_qp *qp,
1223 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001224 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001225 struct qed_rdma_create_qp_in_params *params)
1226{
Amrani, Ramdf158562016-12-22 14:52:24 +02001227 /* QP handle to be written in an async event */
1228 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1229 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001230
Amrani, Ramdf158562016-12-22 14:52:24 +02001231 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1232 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1233 params->pd = pd->pd_id;
1234 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1235 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1236 params->stats_queue = 0;
1237 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1238 params->srq_id = 0;
1239 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001240}
1241
Amrani, Ramdf158562016-12-22 14:52:24 +02001242static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001243{
Amrani, Ramdf158562016-12-22 14:52:24 +02001244 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1245 "qp=%p. "
1246 "sq_addr=0x%llx, "
1247 "sq_len=%zd, "
1248 "rq_addr=0x%llx, "
1249 "rq_len=%zd"
1250 "\n",
1251 qp,
1252 qp->usq.buf_addr,
1253 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1254}
1255
1256static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1257{
1258 if (qp->usq.umem)
1259 ib_umem_release(qp->usq.umem);
1260 qp->usq.umem = NULL;
1261
1262 if (qp->urq.umem)
1263 ib_umem_release(qp->urq.umem);
1264 qp->urq.umem = NULL;
1265}
1266
1267static int qedr_create_user_qp(struct qedr_dev *dev,
1268 struct qedr_qp *qp,
1269 struct ib_pd *ibpd,
1270 struct ib_udata *udata,
1271 struct ib_qp_init_attr *attrs)
1272{
1273 struct qed_rdma_create_qp_in_params in_params;
1274 struct qed_rdma_create_qp_out_params out_params;
1275 struct qedr_pd *pd = get_qedr_pd(ibpd);
1276 struct ib_ucontext *ib_ctx = NULL;
1277 struct qedr_ucontext *ctx = NULL;
1278 struct qedr_create_qp_ureq ureq;
1279 int rc = -EINVAL;
1280
1281 ib_ctx = ibpd->uobject->context;
1282 ctx = get_qedr_ucontext(ib_ctx);
1283
1284 memset(&ureq, 0, sizeof(ureq));
1285 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1286 if (rc) {
1287 DP_ERR(dev, "Problem copying data from user space\n");
1288 return rc;
1289 }
1290
1291 /* SQ - read access only (0), dma sync not required (0) */
1292 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
1293 ureq.sq_len, 0, 0);
1294 if (rc)
1295 return rc;
1296
1297 /* RQ - read access only (0), dma sync not required (0) */
1298 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
1299 ureq.rq_len, 0, 0);
1300
1301 if (rc)
1302 return rc;
1303
1304 memset(&in_params, 0, sizeof(in_params));
1305 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1306 in_params.qp_handle_lo = ureq.qp_handle_lo;
1307 in_params.qp_handle_hi = ureq.qp_handle_hi;
1308 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1309 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1310 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1311 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1312
1313 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1314 &in_params, &out_params);
1315
1316 if (!qp->qed_qp) {
1317 rc = -ENOMEM;
1318 goto err1;
1319 }
1320
1321 qp->qp_id = out_params.qp_id;
1322 qp->icid = out_params.icid;
1323
1324 rc = qedr_copy_qp_uresp(dev, qp, udata);
1325 if (rc)
1326 goto err;
1327
1328 qedr_qp_user_print(dev, qp);
1329
1330 return 0;
1331err:
1332 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1333 if (rc)
1334 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1335
1336err1:
1337 qedr_cleanup_user(dev, qp);
1338 return rc;
1339}
1340
1341static int
1342qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1343 struct qedr_qp *qp,
1344 struct qed_rdma_create_qp_in_params *in_params,
1345 u32 n_sq_elems, u32 n_rq_elems)
1346{
1347 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001348 int rc;
1349
Ram Amranicecbcdd2016-10-10 13:15:34 +03001350 rc = dev->ops->common->chain_alloc(dev->cdev,
1351 QED_CHAIN_USE_TO_PRODUCE,
1352 QED_CHAIN_MODE_PBL,
1353 QED_CHAIN_CNT_TYPE_U32,
1354 n_sq_elems,
1355 QEDR_SQE_ELEMENT_SIZE,
1356 &qp->sq.pbl);
Amrani, Ramdf158562016-12-22 14:52:24 +02001357
1358 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001359 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001360
Amrani, Ramdf158562016-12-22 14:52:24 +02001361 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1362 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001363
Ram Amranicecbcdd2016-10-10 13:15:34 +03001364 rc = dev->ops->common->chain_alloc(dev->cdev,
1365 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1366 QED_CHAIN_MODE_PBL,
1367 QED_CHAIN_CNT_TYPE_U32,
1368 n_rq_elems,
1369 QEDR_RQE_ELEMENT_SIZE,
1370 &qp->rq.pbl);
Amrani, Ramdf158562016-12-22 14:52:24 +02001371 if (rc)
1372 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001373
Amrani, Ramdf158562016-12-22 14:52:24 +02001374 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1375 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001376
Amrani, Ramdf158562016-12-22 14:52:24 +02001377 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1378 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001379
Amrani, Ramdf158562016-12-22 14:52:24 +02001380 if (!qp->qed_qp)
1381 return -EINVAL;
1382
1383 qp->qp_id = out_params.qp_id;
1384 qp->icid = out_params.icid;
1385
1386 qedr_set_roce_db_info(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001387
1388 return 0;
1389}
1390
Amrani, Ramdf158562016-12-22 14:52:24 +02001391static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001392{
Amrani, Ramdf158562016-12-22 14:52:24 +02001393 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1394 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001395
Amrani, Ramdf158562016-12-22 14:52:24 +02001396 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1397 kfree(qp->rqe_wr_id);
1398}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001399
Amrani, Ramdf158562016-12-22 14:52:24 +02001400static int qedr_create_kernel_qp(struct qedr_dev *dev,
1401 struct qedr_qp *qp,
1402 struct ib_pd *ibpd,
1403 struct ib_qp_init_attr *attrs)
1404{
1405 struct qed_rdma_create_qp_in_params in_params;
1406 struct qedr_pd *pd = get_qedr_pd(ibpd);
1407 int rc = -EINVAL;
1408 u32 n_rq_elems;
1409 u32 n_sq_elems;
1410 u32 n_sq_entries;
1411
1412 memset(&in_params, 0, sizeof(in_params));
1413
1414 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1415 * the ring. The ring should allow at least a single WR, even if the
1416 * user requested none, due to allocation issues.
1417 * We should add an extra WR since the prod and cons indices of
1418 * wqe_wr_id are managed in such a way that the WQ is considered full
1419 * when (prod+1)%max_wr==cons. We currently don't do that because we
1420 * double the number of entries due an iSER issue that pushes far more
1421 * WRs than indicated. If we decline its ib_post_send() then we get
1422 * error prints in the dmesg we'd like to avoid.
1423 */
1424 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1425 dev->attr.max_sqe);
1426
1427 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1428 GFP_KERNEL);
1429 if (!qp->wqe_wr_id) {
1430 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1431 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001432 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001433
Amrani, Ramdf158562016-12-22 14:52:24 +02001434 /* QP handle to be written in CQE */
1435 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1436 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001437
Amrani, Ramdf158562016-12-22 14:52:24 +02001438 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1439 * the ring. There ring should allow at least a single WR, even if the
1440 * user requested none, due to allocation issues.
1441 */
1442 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1443
1444 /* Allocate driver internal RQ array */
1445 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1446 GFP_KERNEL);
1447 if (!qp->rqe_wr_id) {
1448 DP_ERR(dev,
1449 "create qp: failed RQ shadow memory allocation\n");
1450 kfree(qp->wqe_wr_id);
1451 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001452 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001453
Amrani, Ramdf158562016-12-22 14:52:24 +02001454 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001455
Amrani, Ramdf158562016-12-22 14:52:24 +02001456 n_sq_entries = attrs->cap.max_send_wr;
1457 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1458 n_sq_entries = max_t(u32, n_sq_entries, 1);
1459 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001460
Amrani, Ramdf158562016-12-22 14:52:24 +02001461 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1462
1463 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1464 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001465 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001466 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001467
1468 return rc;
1469}
1470
1471struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1472 struct ib_qp_init_attr *attrs,
1473 struct ib_udata *udata)
1474{
1475 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001476 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001477 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001478 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001479 int rc = 0;
1480
1481 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1482 udata ? "user library" : "kernel", pd);
1483
1484 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1485 if (rc)
1486 return ERR_PTR(rc);
1487
Wei Yongjun181d8012016-10-28 16:33:47 +00001488 if (attrs->srq)
1489 return ERR_PTR(-EINVAL);
1490
Ram Amranicecbcdd2016-10-10 13:15:34 +03001491 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001492 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1493 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001494 get_qedr_cq(attrs->send_cq),
1495 get_qedr_cq(attrs->send_cq)->icid,
1496 get_qedr_cq(attrs->recv_cq),
1497 get_qedr_cq(attrs->recv_cq)->icid);
1498
Amrani, Ramdf158562016-12-22 14:52:24 +02001499 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1500 if (!qp) {
1501 DP_ERR(dev, "create qp: failed allocating memory\n");
1502 return ERR_PTR(-ENOMEM);
1503 }
1504
1505 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001506
Ram Amrani04886772016-10-10 13:15:38 +03001507 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001508 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1509 if (IS_ERR(ibqp))
1510 kfree(qp);
1511 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001512 }
1513
Amrani, Ramdf158562016-12-22 14:52:24 +02001514 if (udata)
1515 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1516 else
1517 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001518
Amrani, Ramdf158562016-12-22 14:52:24 +02001519 if (rc)
1520 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001521
Ram Amranicecbcdd2016-10-10 13:15:34 +03001522 qp->ibqp.qp_num = qp->qp_id;
1523
Ram Amranicecbcdd2016-10-10 13:15:34 +03001524 return &qp->ibqp;
1525
Amrani, Ramdf158562016-12-22 14:52:24 +02001526err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001527 kfree(qp);
1528
1529 return ERR_PTR(-EFAULT);
1530}
1531
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001532static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001533{
1534 switch (qp_state) {
1535 case QED_ROCE_QP_STATE_RESET:
1536 return IB_QPS_RESET;
1537 case QED_ROCE_QP_STATE_INIT:
1538 return IB_QPS_INIT;
1539 case QED_ROCE_QP_STATE_RTR:
1540 return IB_QPS_RTR;
1541 case QED_ROCE_QP_STATE_RTS:
1542 return IB_QPS_RTS;
1543 case QED_ROCE_QP_STATE_SQD:
1544 return IB_QPS_SQD;
1545 case QED_ROCE_QP_STATE_ERR:
1546 return IB_QPS_ERR;
1547 case QED_ROCE_QP_STATE_SQE:
1548 return IB_QPS_SQE;
1549 }
1550 return IB_QPS_ERR;
1551}
1552
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001553static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1554 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001555{
1556 switch (qp_state) {
1557 case IB_QPS_RESET:
1558 return QED_ROCE_QP_STATE_RESET;
1559 case IB_QPS_INIT:
1560 return QED_ROCE_QP_STATE_INIT;
1561 case IB_QPS_RTR:
1562 return QED_ROCE_QP_STATE_RTR;
1563 case IB_QPS_RTS:
1564 return QED_ROCE_QP_STATE_RTS;
1565 case IB_QPS_SQD:
1566 return QED_ROCE_QP_STATE_SQD;
1567 case IB_QPS_ERR:
1568 return QED_ROCE_QP_STATE_ERR;
1569 default:
1570 return QED_ROCE_QP_STATE_ERR;
1571 }
1572}
1573
1574static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1575{
1576 qed_chain_reset(&qph->pbl);
1577 qph->prod = 0;
1578 qph->cons = 0;
1579 qph->wqe_cons = 0;
1580 qph->db_data.data.value = cpu_to_le16(0);
1581}
1582
1583static int qedr_update_qp_state(struct qedr_dev *dev,
1584 struct qedr_qp *qp,
1585 enum qed_roce_qp_state new_state)
1586{
1587 int status = 0;
1588
1589 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001590 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001591
1592 switch (qp->state) {
1593 case QED_ROCE_QP_STATE_RESET:
1594 switch (new_state) {
1595 case QED_ROCE_QP_STATE_INIT:
1596 qp->prev_wqe_size = 0;
1597 qedr_reset_qp_hwq_info(&qp->sq);
1598 qedr_reset_qp_hwq_info(&qp->rq);
1599 break;
1600 default:
1601 status = -EINVAL;
1602 break;
1603 };
1604 break;
1605 case QED_ROCE_QP_STATE_INIT:
1606 switch (new_state) {
1607 case QED_ROCE_QP_STATE_RTR:
1608 /* Update doorbell (in case post_recv was
1609 * done before move to RTR)
1610 */
1611 wmb();
1612 writel(qp->rq.db_data.raw, qp->rq.db);
1613 /* Make sure write takes effect */
1614 mmiowb();
1615 break;
1616 case QED_ROCE_QP_STATE_ERR:
1617 break;
1618 default:
1619 /* Invalid state change. */
1620 status = -EINVAL;
1621 break;
1622 };
1623 break;
1624 case QED_ROCE_QP_STATE_RTR:
1625 /* RTR->XXX */
1626 switch (new_state) {
1627 case QED_ROCE_QP_STATE_RTS:
1628 break;
1629 case QED_ROCE_QP_STATE_ERR:
1630 break;
1631 default:
1632 /* Invalid state change. */
1633 status = -EINVAL;
1634 break;
1635 };
1636 break;
1637 case QED_ROCE_QP_STATE_RTS:
1638 /* RTS->XXX */
1639 switch (new_state) {
1640 case QED_ROCE_QP_STATE_SQD:
1641 break;
1642 case QED_ROCE_QP_STATE_ERR:
1643 break;
1644 default:
1645 /* Invalid state change. */
1646 status = -EINVAL;
1647 break;
1648 };
1649 break;
1650 case QED_ROCE_QP_STATE_SQD:
1651 /* SQD->XXX */
1652 switch (new_state) {
1653 case QED_ROCE_QP_STATE_RTS:
1654 case QED_ROCE_QP_STATE_ERR:
1655 break;
1656 default:
1657 /* Invalid state change. */
1658 status = -EINVAL;
1659 break;
1660 };
1661 break;
1662 case QED_ROCE_QP_STATE_ERR:
1663 /* ERR->XXX */
1664 switch (new_state) {
1665 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001666 if ((qp->rq.prod != qp->rq.cons) ||
1667 (qp->sq.prod != qp->sq.cons)) {
1668 DP_NOTICE(dev,
1669 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1670 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1671 qp->sq.cons);
1672 status = -EINVAL;
1673 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001674 break;
1675 default:
1676 status = -EINVAL;
1677 break;
1678 };
1679 break;
1680 default:
1681 status = -EINVAL;
1682 break;
1683 };
1684
1685 return status;
1686}
1687
1688int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1689 int attr_mask, struct ib_udata *udata)
1690{
1691 struct qedr_qp *qp = get_qedr_qp(ibqp);
1692 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1693 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1694 enum ib_qp_state old_qp_state, new_qp_state;
1695 int rc = 0;
1696
1697 DP_DEBUG(dev, QEDR_MSG_QP,
1698 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1699 attr->qp_state);
1700
1701 old_qp_state = qedr_get_ibqp_state(qp->state);
1702 if (attr_mask & IB_QP_STATE)
1703 new_qp_state = attr->qp_state;
1704 else
1705 new_qp_state = old_qp_state;
1706
1707 if (!ib_modify_qp_is_ok
1708 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1709 IB_LINK_LAYER_ETHERNET)) {
1710 DP_ERR(dev,
1711 "modify qp: invalid attribute mask=0x%x specified for\n"
1712 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1713 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1714 new_qp_state);
1715 rc = -EINVAL;
1716 goto err;
1717 }
1718
1719 /* Translate the masks... */
1720 if (attr_mask & IB_QP_STATE) {
1721 SET_FIELD(qp_params.modify_flags,
1722 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1723 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1724 }
1725
1726 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1727 qp_params.sqd_async = true;
1728
1729 if (attr_mask & IB_QP_PKEY_INDEX) {
1730 SET_FIELD(qp_params.modify_flags,
1731 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1732 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1733 rc = -EINVAL;
1734 goto err;
1735 }
1736
1737 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1738 }
1739
1740 if (attr_mask & IB_QP_QKEY)
1741 qp->qkey = attr->qkey;
1742
1743 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1744 SET_FIELD(qp_params.modify_flags,
1745 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1746 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1747 IB_ACCESS_REMOTE_READ;
1748 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1749 IB_ACCESS_REMOTE_WRITE;
1750 qp_params.incoming_atomic_en = attr->qp_access_flags &
1751 IB_ACCESS_REMOTE_ATOMIC;
1752 }
1753
1754 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1755 if (attr_mask & IB_QP_PATH_MTU) {
1756 if (attr->path_mtu < IB_MTU_256 ||
1757 attr->path_mtu > IB_MTU_4096) {
1758 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1759 rc = -EINVAL;
1760 goto err;
1761 }
1762 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1763 ib_mtu_enum_to_int(iboe_get_mtu
1764 (dev->ndev->mtu)));
1765 }
1766
1767 if (!qp->mtu) {
1768 qp->mtu =
1769 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1770 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1771 }
1772
1773 SET_FIELD(qp_params.modify_flags,
1774 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1775
1776 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1777 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1778 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1779
1780 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1781
1782 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1783 if (rc) {
1784 DP_ERR(dev,
1785 "modify qp: problems with GID index %d (rc=%d)\n",
1786 attr->ah_attr.grh.sgid_index, rc);
1787 return rc;
1788 }
1789
1790 rc = qedr_get_dmac(dev, &attr->ah_attr,
1791 qp_params.remote_mac_addr);
1792 if (rc)
1793 return rc;
1794
1795 qp_params.use_local_mac = true;
1796 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1797
1798 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1799 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1800 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1801 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1802 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1803 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1804 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1805 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001806
1807 qp_params.mtu = qp->mtu;
1808 qp_params.lb_indication = false;
1809 }
1810
1811 if (!qp_params.mtu) {
1812 /* Stay with current MTU */
1813 if (qp->mtu)
1814 qp_params.mtu = qp->mtu;
1815 else
1816 qp_params.mtu =
1817 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1818 }
1819
1820 if (attr_mask & IB_QP_TIMEOUT) {
1821 SET_FIELD(qp_params.modify_flags,
1822 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1823
1824 qp_params.ack_timeout = attr->timeout;
1825 if (attr->timeout) {
1826 u32 temp;
1827
1828 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1829 /* FW requires [msec] */
1830 qp_params.ack_timeout = temp;
1831 } else {
1832 /* Infinite */
1833 qp_params.ack_timeout = 0;
1834 }
1835 }
1836 if (attr_mask & IB_QP_RETRY_CNT) {
1837 SET_FIELD(qp_params.modify_flags,
1838 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1839 qp_params.retry_cnt = attr->retry_cnt;
1840 }
1841
1842 if (attr_mask & IB_QP_RNR_RETRY) {
1843 SET_FIELD(qp_params.modify_flags,
1844 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1845 qp_params.rnr_retry_cnt = attr->rnr_retry;
1846 }
1847
1848 if (attr_mask & IB_QP_RQ_PSN) {
1849 SET_FIELD(qp_params.modify_flags,
1850 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1851 qp_params.rq_psn = attr->rq_psn;
1852 qp->rq_psn = attr->rq_psn;
1853 }
1854
1855 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1856 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1857 rc = -EINVAL;
1858 DP_ERR(dev,
1859 "unsupported max_rd_atomic=%d, supported=%d\n",
1860 attr->max_rd_atomic,
1861 dev->attr.max_qp_req_rd_atomic_resc);
1862 goto err;
1863 }
1864
1865 SET_FIELD(qp_params.modify_flags,
1866 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1867 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1868 }
1869
1870 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1871 SET_FIELD(qp_params.modify_flags,
1872 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1873 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1874 }
1875
1876 if (attr_mask & IB_QP_SQ_PSN) {
1877 SET_FIELD(qp_params.modify_flags,
1878 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1879 qp_params.sq_psn = attr->sq_psn;
1880 qp->sq_psn = attr->sq_psn;
1881 }
1882
1883 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1884 if (attr->max_dest_rd_atomic >
1885 dev->attr.max_qp_resp_rd_atomic_resc) {
1886 DP_ERR(dev,
1887 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1888 attr->max_dest_rd_atomic,
1889 dev->attr.max_qp_resp_rd_atomic_resc);
1890
1891 rc = -EINVAL;
1892 goto err;
1893 }
1894
1895 SET_FIELD(qp_params.modify_flags,
1896 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1897 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1898 }
1899
1900 if (attr_mask & IB_QP_DEST_QPN) {
1901 SET_FIELD(qp_params.modify_flags,
1902 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1903
1904 qp_params.dest_qp = attr->dest_qp_num;
1905 qp->dest_qp_num = attr->dest_qp_num;
1906 }
1907
1908 if (qp->qp_type != IB_QPT_GSI)
1909 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1910 qp->qed_qp, &qp_params);
1911
1912 if (attr_mask & IB_QP_STATE) {
1913 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02001914 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001915 qp->state = qp_params.new_state;
1916 }
1917
1918err:
1919 return rc;
1920}
1921
1922static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1923{
1924 int ib_qp_acc_flags = 0;
1925
1926 if (params->incoming_rdma_write_en)
1927 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1928 if (params->incoming_rdma_read_en)
1929 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
1930 if (params->incoming_atomic_en)
1931 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1932 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1933 return ib_qp_acc_flags;
1934}
1935
1936int qedr_query_qp(struct ib_qp *ibqp,
1937 struct ib_qp_attr *qp_attr,
1938 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1939{
1940 struct qed_rdma_query_qp_out_params params;
1941 struct qedr_qp *qp = get_qedr_qp(ibqp);
1942 struct qedr_dev *dev = qp->dev;
1943 int rc = 0;
1944
1945 memset(&params, 0, sizeof(params));
1946
1947 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
1948 if (rc)
1949 goto err;
1950
1951 memset(qp_attr, 0, sizeof(*qp_attr));
1952 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
1953
1954 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
1955 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02001956 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001957 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1958 qp_attr->rq_psn = params.rq_psn;
1959 qp_attr->sq_psn = params.sq_psn;
1960 qp_attr->dest_qp_num = params.dest_qp;
1961
1962 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
1963
1964 qp_attr->cap.max_send_wr = qp->sq.max_wr;
1965 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
1966 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1967 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02001968 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001969 qp_init_attr->cap = qp_attr->cap;
1970
1971 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
1972 sizeof(qp_attr->ah_attr.grh.dgid.raw));
1973
1974 qp_attr->ah_attr.grh.flow_label = params.flow_label;
1975 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1976 qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
1977 qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
1978
1979 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1980 qp_attr->ah_attr.port_num = 1;
1981 qp_attr->ah_attr.sl = 0;
1982 qp_attr->timeout = params.timeout;
1983 qp_attr->rnr_retry = params.rnr_retry;
1984 qp_attr->retry_cnt = params.retry_cnt;
1985 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
1986 qp_attr->pkey_index = params.pkey_index;
1987 qp_attr->port_num = 1;
1988 qp_attr->ah_attr.src_path_bits = 0;
1989 qp_attr->ah_attr.static_rate = 0;
1990 qp_attr->alt_pkey_index = 0;
1991 qp_attr->alt_port_num = 0;
1992 qp_attr->alt_timeout = 0;
1993 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1994
1995 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
1996 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
1997 qp_attr->max_rd_atomic = params.max_rd_atomic;
1998 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
1999
2000 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2001 qp_attr->cap.max_inline_data);
2002
2003err:
2004 return rc;
2005}
2006
Amrani, Ramdf158562016-12-22 14:52:24 +02002007int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2008{
2009 int rc = 0;
2010
2011 if (qp->qp_type != IB_QPT_GSI) {
2012 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2013 if (rc)
2014 return rc;
2015 }
2016
2017 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2018 qedr_cleanup_user(dev, qp);
2019 else
2020 qedr_cleanup_kernel(dev, qp);
2021
2022 return 0;
2023}
2024
Ram Amranicecbcdd2016-10-10 13:15:34 +03002025int qedr_destroy_qp(struct ib_qp *ibqp)
2026{
2027 struct qedr_qp *qp = get_qedr_qp(ibqp);
2028 struct qedr_dev *dev = qp->dev;
2029 struct ib_qp_attr attr;
2030 int attr_mask = 0;
2031 int rc = 0;
2032
2033 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2034 qp, qp->qp_type);
2035
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002036 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2037 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2038 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2039
Ram Amranicecbcdd2016-10-10 13:15:34 +03002040 attr.qp_state = IB_QPS_ERR;
2041 attr_mask |= IB_QP_STATE;
2042
2043 /* Change the QP state to ERROR */
2044 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2045 }
2046
Amrani, Ramdf158562016-12-22 14:52:24 +02002047 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002048 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002049
Amrani, Ramdf158562016-12-22 14:52:24 +02002050 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002051
2052 kfree(qp);
2053
2054 return rc;
2055}
Ram Amranie0290cc2016-10-10 13:15:35 +03002056
Moni Shoua477864c2016-11-23 08:23:24 +02002057struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
2058 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002059{
2060 struct qedr_ah *ah;
2061
2062 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2063 if (!ah)
2064 return ERR_PTR(-ENOMEM);
2065
2066 ah->attr = *attr;
2067
2068 return &ah->ibah;
2069}
2070
2071int qedr_destroy_ah(struct ib_ah *ibah)
2072{
2073 struct qedr_ah *ah = get_qedr_ah(ibah);
2074
2075 kfree(ah);
2076 return 0;
2077}
2078
Ram Amranie0290cc2016-10-10 13:15:35 +03002079static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2080{
2081 struct qedr_pbl *pbl, *tmp;
2082
2083 if (info->pbl_table)
2084 list_add_tail(&info->pbl_table->list_entry,
2085 &info->free_pbl_list);
2086
2087 if (!list_empty(&info->inuse_pbl_list))
2088 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2089
2090 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2091 list_del(&pbl->list_entry);
2092 qedr_free_pbl(dev, &info->pbl_info, pbl);
2093 }
2094}
2095
2096static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2097 size_t page_list_len, bool two_layered)
2098{
2099 struct qedr_pbl *tmp;
2100 int rc;
2101
2102 INIT_LIST_HEAD(&info->free_pbl_list);
2103 INIT_LIST_HEAD(&info->inuse_pbl_list);
2104
2105 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2106 page_list_len, two_layered);
2107 if (rc)
2108 goto done;
2109
2110 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002111 if (IS_ERR(info->pbl_table)) {
2112 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002113 goto done;
2114 }
2115
2116 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2117 &info->pbl_table->pa);
2118
2119 /* in usual case we use 2 PBLs, so we add one to free
2120 * list and allocating another one
2121 */
2122 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002123 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002124 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2125 goto done;
2126 }
2127
2128 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2129
2130 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2131
2132done:
2133 if (rc)
2134 free_mr_info(dev, info);
2135
2136 return rc;
2137}
2138
2139struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2140 u64 usr_addr, int acc, struct ib_udata *udata)
2141{
2142 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2143 struct qedr_mr *mr;
2144 struct qedr_pd *pd;
2145 int rc = -ENOMEM;
2146
2147 pd = get_qedr_pd(ibpd);
2148 DP_DEBUG(dev, QEDR_MSG_MR,
2149 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2150 pd->pd_id, start, len, usr_addr, acc);
2151
2152 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2153 return ERR_PTR(-EINVAL);
2154
2155 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2156 if (!mr)
2157 return ERR_PTR(rc);
2158
2159 mr->type = QEDR_MR_USER;
2160
2161 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2162 if (IS_ERR(mr->umem)) {
2163 rc = -EFAULT;
2164 goto err0;
2165 }
2166
2167 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2168 if (rc)
2169 goto err1;
2170
2171 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2172 &mr->info.pbl_info);
2173
2174 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2175 if (rc) {
2176 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2177 goto err1;
2178 }
2179
2180 /* Index only, 18 bit long, lkey = itid << 8 | key */
2181 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2182 mr->hw_mr.key = 0;
2183 mr->hw_mr.pd = pd->pd_id;
2184 mr->hw_mr.local_read = 1;
2185 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2186 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2187 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2188 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2189 mr->hw_mr.mw_bind = false;
2190 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2191 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2192 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002193 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002194 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2195 mr->hw_mr.length = len;
2196 mr->hw_mr.vaddr = usr_addr;
2197 mr->hw_mr.zbva = false;
2198 mr->hw_mr.phy_mr = false;
2199 mr->hw_mr.dma_mr = false;
2200
2201 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2202 if (rc) {
2203 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2204 goto err2;
2205 }
2206
2207 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2208 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2209 mr->hw_mr.remote_atomic)
2210 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2211
2212 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2213 mr->ibmr.lkey);
2214 return &mr->ibmr;
2215
2216err2:
2217 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2218err1:
2219 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2220err0:
2221 kfree(mr);
2222 return ERR_PTR(rc);
2223}
2224
2225int qedr_dereg_mr(struct ib_mr *ib_mr)
2226{
2227 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2228 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2229 int rc = 0;
2230
2231 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2232 if (rc)
2233 return rc;
2234
2235 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2236
2237 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2238 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2239
2240 /* it could be user registered memory. */
2241 if (mr->umem)
2242 ib_umem_release(mr->umem);
2243
2244 kfree(mr);
2245
2246 return rc;
2247}
2248
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002249static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2250 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002251{
2252 struct qedr_pd *pd = get_qedr_pd(ibpd);
2253 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2254 struct qedr_mr *mr;
2255 int rc = -ENOMEM;
2256
2257 DP_DEBUG(dev, QEDR_MSG_MR,
2258 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2259 max_page_list_len);
2260
2261 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2262 if (!mr)
2263 return ERR_PTR(rc);
2264
2265 mr->dev = dev;
2266 mr->type = QEDR_MR_FRMR;
2267
2268 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2269 if (rc)
2270 goto err0;
2271
2272 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2273 if (rc) {
2274 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2275 goto err0;
2276 }
2277
2278 /* Index only, 18 bit long, lkey = itid << 8 | key */
2279 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2280 mr->hw_mr.key = 0;
2281 mr->hw_mr.pd = pd->pd_id;
2282 mr->hw_mr.local_read = 1;
2283 mr->hw_mr.local_write = 0;
2284 mr->hw_mr.remote_read = 0;
2285 mr->hw_mr.remote_write = 0;
2286 mr->hw_mr.remote_atomic = 0;
2287 mr->hw_mr.mw_bind = false;
2288 mr->hw_mr.pbl_ptr = 0;
2289 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2290 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2291 mr->hw_mr.fbo = 0;
2292 mr->hw_mr.length = 0;
2293 mr->hw_mr.vaddr = 0;
2294 mr->hw_mr.zbva = false;
2295 mr->hw_mr.phy_mr = true;
2296 mr->hw_mr.dma_mr = false;
2297
2298 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2299 if (rc) {
2300 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2301 goto err1;
2302 }
2303
2304 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2305 mr->ibmr.rkey = mr->ibmr.lkey;
2306
2307 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2308 return mr;
2309
2310err1:
2311 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2312err0:
2313 kfree(mr);
2314 return ERR_PTR(rc);
2315}
2316
2317struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2318 enum ib_mr_type mr_type, u32 max_num_sg)
2319{
2320 struct qedr_dev *dev;
2321 struct qedr_mr *mr;
2322
2323 if (mr_type != IB_MR_TYPE_MEM_REG)
2324 return ERR_PTR(-EINVAL);
2325
2326 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2327
2328 if (IS_ERR(mr))
2329 return ERR_PTR(-EINVAL);
2330
2331 dev = mr->dev;
2332
2333 return &mr->ibmr;
2334}
2335
2336static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2337{
2338 struct qedr_mr *mr = get_qedr_mr(ibmr);
2339 struct qedr_pbl *pbl_table;
2340 struct regpair *pbe;
2341 u32 pbes_in_page;
2342
2343 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2344 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2345 return -ENOMEM;
2346 }
2347
2348 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2349 mr->npages, addr);
2350
2351 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2352 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2353 pbe = (struct regpair *)pbl_table->va;
2354 pbe += mr->npages % pbes_in_page;
2355 pbe->lo = cpu_to_le32((u32)addr);
2356 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2357
2358 mr->npages++;
2359
2360 return 0;
2361}
2362
2363static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2364{
2365 int work = info->completed - info->completed_handled - 1;
2366
2367 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2368 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2369 struct qedr_pbl *pbl;
2370
2371 /* Free all the page list that are possible to be freed
2372 * (all the ones that were invalidated), under the assumption
2373 * that if an FMR was completed successfully that means that
2374 * if there was an invalidate operation before it also ended
2375 */
2376 pbl = list_first_entry(&info->inuse_pbl_list,
2377 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002378 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002379 info->completed_handled++;
2380 }
2381}
2382
2383int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2384 int sg_nents, unsigned int *sg_offset)
2385{
2386 struct qedr_mr *mr = get_qedr_mr(ibmr);
2387
2388 mr->npages = 0;
2389
2390 handle_completed_mrs(mr->dev, &mr->info);
2391 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2392}
2393
2394struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2395{
2396 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2397 struct qedr_pd *pd = get_qedr_pd(ibpd);
2398 struct qedr_mr *mr;
2399 int rc;
2400
2401 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2402 if (!mr)
2403 return ERR_PTR(-ENOMEM);
2404
2405 mr->type = QEDR_MR_DMA;
2406
2407 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2408 if (rc) {
2409 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2410 goto err1;
2411 }
2412
2413 /* index only, 18 bit long, lkey = itid << 8 | key */
2414 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2415 mr->hw_mr.pd = pd->pd_id;
2416 mr->hw_mr.local_read = 1;
2417 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2418 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2419 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2420 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2421 mr->hw_mr.dma_mr = true;
2422
2423 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2424 if (rc) {
2425 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2426 goto err2;
2427 }
2428
2429 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2430 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2431 mr->hw_mr.remote_atomic)
2432 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2433
2434 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2435 return &mr->ibmr;
2436
2437err2:
2438 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2439err1:
2440 kfree(mr);
2441 return ERR_PTR(rc);
2442}
Ram Amraniafa0e132016-10-10 13:15:36 +03002443
2444static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2445{
2446 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2447}
2448
2449static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2450{
2451 int i, len = 0;
2452
2453 for (i = 0; i < num_sge; i++)
2454 len += sg_list[i].length;
2455
2456 return len;
2457}
2458
2459static void swap_wqe_data64(u64 *p)
2460{
2461 int i;
2462
2463 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2464 *p = cpu_to_be64(cpu_to_le64(*p));
2465}
2466
2467static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2468 struct qedr_qp *qp, u8 *wqe_size,
2469 struct ib_send_wr *wr,
2470 struct ib_send_wr **bad_wr, u8 *bits,
2471 u8 bit)
2472{
2473 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2474 char *seg_prt, *wqe;
2475 int i, seg_siz;
2476
2477 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2478 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2479 *bad_wr = wr;
2480 return 0;
2481 }
2482
2483 if (!data_size)
2484 return data_size;
2485
2486 *bits |= bit;
2487
2488 seg_prt = NULL;
2489 wqe = NULL;
2490 seg_siz = 0;
2491
2492 /* Copy data inline */
2493 for (i = 0; i < wr->num_sge; i++) {
2494 u32 len = wr->sg_list[i].length;
2495 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2496
2497 while (len > 0) {
2498 u32 cur;
2499
2500 /* New segment required */
2501 if (!seg_siz) {
2502 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2503 seg_prt = wqe;
2504 seg_siz = sizeof(struct rdma_sq_common_wqe);
2505 (*wqe_size)++;
2506 }
2507
2508 /* Calculate currently allowed length */
2509 cur = min_t(u32, len, seg_siz);
2510 memcpy(seg_prt, src, cur);
2511
2512 /* Update segment variables */
2513 seg_prt += cur;
2514 seg_siz -= cur;
2515
2516 /* Update sge variables */
2517 src += cur;
2518 len -= cur;
2519
2520 /* Swap fully-completed segments */
2521 if (!seg_siz)
2522 swap_wqe_data64((u64 *)wqe);
2523 }
2524 }
2525
2526 /* swap last not completed segment */
2527 if (seg_siz)
2528 swap_wqe_data64((u64 *)wqe);
2529
2530 return data_size;
2531}
2532
2533#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2534 do { \
2535 DMA_REGPAIR_LE(sge->addr, vaddr); \
2536 (sge)->length = cpu_to_le32(vlength); \
2537 (sge)->flags = cpu_to_le32(vflags); \
2538 } while (0)
2539
2540#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2541 do { \
2542 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2543 (hdr)->num_sges = num_sge; \
2544 } while (0)
2545
2546#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2547 do { \
2548 DMA_REGPAIR_LE(sge->addr, vaddr); \
2549 (sge)->length = cpu_to_le32(vlength); \
2550 (sge)->l_key = cpu_to_le32(vlkey); \
2551 } while (0)
2552
2553static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2554 struct ib_send_wr *wr)
2555{
2556 u32 data_size = 0;
2557 int i;
2558
2559 for (i = 0; i < wr->num_sge; i++) {
2560 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2561
2562 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2563 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2564 sge->length = cpu_to_le32(wr->sg_list[i].length);
2565 data_size += wr->sg_list[i].length;
2566 }
2567
2568 if (wqe_size)
2569 *wqe_size += wr->num_sge;
2570
2571 return data_size;
2572}
2573
2574static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2575 struct qedr_qp *qp,
2576 struct rdma_sq_rdma_wqe_1st *rwqe,
2577 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2578 struct ib_send_wr *wr,
2579 struct ib_send_wr **bad_wr)
2580{
2581 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2582 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2583
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002584 if (wr->send_flags & IB_SEND_INLINE &&
2585 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2586 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002587 u8 flags = 0;
2588
2589 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2590 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2591 bad_wr, &rwqe->flags, flags);
2592 }
2593
2594 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2595}
2596
2597static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2598 struct qedr_qp *qp,
2599 struct rdma_sq_send_wqe_1st *swqe,
2600 struct rdma_sq_send_wqe_2st *swqe2,
2601 struct ib_send_wr *wr,
2602 struct ib_send_wr **bad_wr)
2603{
2604 memset(swqe2, 0, sizeof(*swqe2));
2605 if (wr->send_flags & IB_SEND_INLINE) {
2606 u8 flags = 0;
2607
2608 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2609 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2610 bad_wr, &swqe->flags, flags);
2611 }
2612
2613 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2614}
2615
2616static int qedr_prepare_reg(struct qedr_qp *qp,
2617 struct rdma_sq_fmr_wqe_1st *fwqe1,
2618 struct ib_reg_wr *wr)
2619{
2620 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2621 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2622
2623 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2624 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2625 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2626 fwqe1->l_key = wr->key;
2627
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002628 fwqe2->access_ctrl = 0;
2629
Ram Amraniafa0e132016-10-10 13:15:36 +03002630 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2631 !!(wr->access & IB_ACCESS_REMOTE_READ));
2632 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2633 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2634 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2635 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2636 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2637 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2638 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2639 fwqe2->fmr_ctrl = 0;
2640
2641 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2642 ilog2(mr->ibmr.page_size) - 12);
2643
2644 fwqe2->length_hi = 0;
2645 fwqe2->length_lo = mr->ibmr.length;
2646 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2647 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2648
2649 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2650
2651 return 0;
2652}
2653
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002654static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002655{
2656 switch (opcode) {
2657 case IB_WR_RDMA_WRITE:
2658 case IB_WR_RDMA_WRITE_WITH_IMM:
2659 return IB_WC_RDMA_WRITE;
2660 case IB_WR_SEND_WITH_IMM:
2661 case IB_WR_SEND:
2662 case IB_WR_SEND_WITH_INV:
2663 return IB_WC_SEND;
2664 case IB_WR_RDMA_READ:
2665 return IB_WC_RDMA_READ;
2666 case IB_WR_ATOMIC_CMP_AND_SWP:
2667 return IB_WC_COMP_SWAP;
2668 case IB_WR_ATOMIC_FETCH_AND_ADD:
2669 return IB_WC_FETCH_ADD;
2670 case IB_WR_REG_MR:
2671 return IB_WC_REG_MR;
2672 case IB_WR_LOCAL_INV:
2673 return IB_WC_LOCAL_INV;
2674 default:
2675 return IB_WC_SEND;
2676 }
2677}
2678
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002679static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002680{
2681 int wq_is_full, err_wr, pbl_is_full;
2682 struct qedr_dev *dev = qp->dev;
2683
2684 /* prevent SQ overflow and/or processing of a bad WR */
2685 err_wr = wr->num_sge > qp->sq.max_sges;
2686 wq_is_full = qedr_wq_is_full(&qp->sq);
2687 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2688 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2689 if (wq_is_full || err_wr || pbl_is_full) {
2690 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2691 DP_ERR(dev,
2692 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2693 qp);
2694 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2695 }
2696
2697 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2698 DP_ERR(dev,
2699 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2700 qp);
2701 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2702 }
2703
2704 if (pbl_is_full &&
2705 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2706 DP_ERR(dev,
2707 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2708 qp);
2709 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2710 }
2711 return false;
2712 }
2713 return true;
2714}
2715
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002716static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03002717 struct ib_send_wr **bad_wr)
2718{
2719 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2720 struct qedr_qp *qp = get_qedr_qp(ibqp);
2721 struct rdma_sq_atomic_wqe_1st *awqe1;
2722 struct rdma_sq_atomic_wqe_2nd *awqe2;
2723 struct rdma_sq_atomic_wqe_3rd *awqe3;
2724 struct rdma_sq_send_wqe_2st *swqe2;
2725 struct rdma_sq_local_inv_wqe *iwqe;
2726 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2727 struct rdma_sq_send_wqe_1st *swqe;
2728 struct rdma_sq_rdma_wqe_1st *rwqe;
2729 struct rdma_sq_fmr_wqe_1st *fwqe1;
2730 struct rdma_sq_common_wqe *wqe;
2731 u32 length;
2732 int rc = 0;
2733 bool comp;
2734
2735 if (!qedr_can_post_send(qp, wr)) {
2736 *bad_wr = wr;
2737 return -ENOMEM;
2738 }
2739
2740 wqe = qed_chain_produce(&qp->sq.pbl);
2741 qp->wqe_wr_id[qp->sq.prod].signaled =
2742 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2743
2744 wqe->flags = 0;
2745 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2746 !!(wr->send_flags & IB_SEND_SOLICITED));
2747 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2748 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2749 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2750 !!(wr->send_flags & IB_SEND_FENCE));
2751 wqe->prev_wqe_size = qp->prev_wqe_size;
2752
2753 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2754
2755 switch (wr->opcode) {
2756 case IB_WR_SEND_WITH_IMM:
2757 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2758 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2759 swqe->wqe_size = 2;
2760 swqe2 = qed_chain_produce(&qp->sq.pbl);
2761
2762 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2763 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2764 wr, bad_wr);
2765 swqe->length = cpu_to_le32(length);
2766 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2767 qp->prev_wqe_size = swqe->wqe_size;
2768 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2769 break;
2770 case IB_WR_SEND:
2771 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2772 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2773
2774 swqe->wqe_size = 2;
2775 swqe2 = qed_chain_produce(&qp->sq.pbl);
2776 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2777 wr, bad_wr);
2778 swqe->length = cpu_to_le32(length);
2779 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2780 qp->prev_wqe_size = swqe->wqe_size;
2781 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2782 break;
2783 case IB_WR_SEND_WITH_INV:
2784 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2785 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2786 swqe2 = qed_chain_produce(&qp->sq.pbl);
2787 swqe->wqe_size = 2;
2788 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2789 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2790 wr, bad_wr);
2791 swqe->length = cpu_to_le32(length);
2792 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2793 qp->prev_wqe_size = swqe->wqe_size;
2794 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2795 break;
2796
2797 case IB_WR_RDMA_WRITE_WITH_IMM:
2798 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2799 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2800
2801 rwqe->wqe_size = 2;
2802 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2803 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2804 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2805 wr, bad_wr);
2806 rwqe->length = cpu_to_le32(length);
2807 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2808 qp->prev_wqe_size = rwqe->wqe_size;
2809 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2810 break;
2811 case IB_WR_RDMA_WRITE:
2812 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2813 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2814
2815 rwqe->wqe_size = 2;
2816 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2817 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2818 wr, bad_wr);
2819 rwqe->length = cpu_to_le32(length);
2820 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2821 qp->prev_wqe_size = rwqe->wqe_size;
2822 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2823 break;
2824 case IB_WR_RDMA_READ_WITH_INV:
2825 DP_ERR(dev,
2826 "RDMA READ WITH INVALIDATE not supported\n");
2827 *bad_wr = wr;
2828 rc = -EINVAL;
2829 break;
2830
2831 case IB_WR_RDMA_READ:
2832 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2833 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2834
2835 rwqe->wqe_size = 2;
2836 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2837 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2838 wr, bad_wr);
2839 rwqe->length = cpu_to_le32(length);
2840 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2841 qp->prev_wqe_size = rwqe->wqe_size;
2842 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2843 break;
2844
2845 case IB_WR_ATOMIC_CMP_AND_SWP:
2846 case IB_WR_ATOMIC_FETCH_AND_ADD:
2847 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2848 awqe1->wqe_size = 4;
2849
2850 awqe2 = qed_chain_produce(&qp->sq.pbl);
2851 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2852 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2853
2854 awqe3 = qed_chain_produce(&qp->sq.pbl);
2855
2856 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2857 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2858 DMA_REGPAIR_LE(awqe3->swap_data,
2859 atomic_wr(wr)->compare_add);
2860 } else {
2861 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2862 DMA_REGPAIR_LE(awqe3->swap_data,
2863 atomic_wr(wr)->swap);
2864 DMA_REGPAIR_LE(awqe3->cmp_data,
2865 atomic_wr(wr)->compare_add);
2866 }
2867
2868 qedr_prepare_sq_sges(qp, NULL, wr);
2869
2870 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2871 qp->prev_wqe_size = awqe1->wqe_size;
2872 break;
2873
2874 case IB_WR_LOCAL_INV:
2875 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2876 iwqe->wqe_size = 1;
2877
2878 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2879 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2880 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2881 qp->prev_wqe_size = iwqe->wqe_size;
2882 break;
2883 case IB_WR_REG_MR:
2884 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2885 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2886 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2887 fwqe1->wqe_size = 2;
2888
2889 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2890 if (rc) {
2891 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2892 *bad_wr = wr;
2893 break;
2894 }
2895
2896 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2897 qp->prev_wqe_size = fwqe1->wqe_size;
2898 break;
2899 default:
2900 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2901 rc = -EINVAL;
2902 *bad_wr = wr;
2903 break;
2904 }
2905
2906 if (*bad_wr) {
2907 u16 value;
2908
2909 /* Restore prod to its position before
2910 * this WR was processed
2911 */
2912 value = le16_to_cpu(qp->sq.db_data.data.value);
2913 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2914
2915 /* Restore prev_wqe_size */
2916 qp->prev_wqe_size = wqe->prev_wqe_size;
2917 rc = -EINVAL;
2918 DP_ERR(dev, "POST SEND FAILED\n");
2919 }
2920
2921 return rc;
2922}
2923
2924int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2925 struct ib_send_wr **bad_wr)
2926{
2927 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2928 struct qedr_qp *qp = get_qedr_qp(ibqp);
2929 unsigned long flags;
2930 int rc = 0;
2931
2932 *bad_wr = NULL;
2933
Ram Amrani04886772016-10-10 13:15:38 +03002934 if (qp->qp_type == IB_QPT_GSI)
2935 return qedr_gsi_post_send(ibqp, wr, bad_wr);
2936
Ram Amraniafa0e132016-10-10 13:15:36 +03002937 spin_lock_irqsave(&qp->q_lock, flags);
2938
Amrani, Ram922d9a42016-12-22 14:40:38 +02002939 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
2940 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2941 (qp->state != QED_ROCE_QP_STATE_SQD)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002942 spin_unlock_irqrestore(&qp->q_lock, flags);
2943 *bad_wr = wr;
2944 DP_DEBUG(dev, QEDR_MSG_CQ,
2945 "QP in wrong state! QP icid=0x%x state %d\n",
2946 qp->icid, qp->state);
2947 return -EINVAL;
2948 }
2949
Ram Amraniafa0e132016-10-10 13:15:36 +03002950 while (wr) {
2951 rc = __qedr_post_send(ibqp, wr, bad_wr);
2952 if (rc)
2953 break;
2954
2955 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
2956
2957 qedr_inc_sw_prod(&qp->sq);
2958
2959 qp->sq.db_data.data.value++;
2960
2961 wr = wr->next;
2962 }
2963
2964 /* Trigger doorbell
2965 * If there was a failure in the first WR then it will be triggered in
2966 * vane. However this is not harmful (as long as the producer value is
2967 * unchanged). For performance reasons we avoid checking for this
2968 * redundant doorbell.
2969 */
2970 wmb();
2971 writel(qp->sq.db_data.raw, qp->sq.db);
2972
2973 /* Make sure write sticks */
2974 mmiowb();
2975
2976 spin_unlock_irqrestore(&qp->q_lock, flags);
2977
2978 return rc;
2979}
2980
2981int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2982 struct ib_recv_wr **bad_wr)
2983{
2984 struct qedr_qp *qp = get_qedr_qp(ibqp);
2985 struct qedr_dev *dev = qp->dev;
2986 unsigned long flags;
2987 int status = 0;
2988
Ram Amrani04886772016-10-10 13:15:38 +03002989 if (qp->qp_type == IB_QPT_GSI)
2990 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
2991
Ram Amraniafa0e132016-10-10 13:15:36 +03002992 spin_lock_irqsave(&qp->q_lock, flags);
2993
Amrani, Ram922d9a42016-12-22 14:40:38 +02002994 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002995 spin_unlock_irqrestore(&qp->q_lock, flags);
2996 *bad_wr = wr;
2997 return -EINVAL;
2998 }
2999
3000 while (wr) {
3001 int i;
3002
3003 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3004 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3005 wr->num_sge > qp->rq.max_sges) {
3006 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3007 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3008 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3009 qp->rq.max_sges);
3010 status = -ENOMEM;
3011 *bad_wr = wr;
3012 break;
3013 }
3014 for (i = 0; i < wr->num_sge; i++) {
3015 u32 flags = 0;
3016 struct rdma_rq_sge *rqe =
3017 qed_chain_produce(&qp->rq.pbl);
3018
3019 /* First one must include the number
3020 * of SGE in the list
3021 */
3022 if (!i)
3023 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3024 wr->num_sge);
3025
3026 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3027 wr->sg_list[i].lkey);
3028
3029 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3030 wr->sg_list[i].length, flags);
3031 }
3032
3033 /* Special case of no sges. FW requires between 1-4 sges...
3034 * in this case we need to post 1 sge with length zero. this is
3035 * because rdma write with immediate consumes an RQ.
3036 */
3037 if (!wr->num_sge) {
3038 u32 flags = 0;
3039 struct rdma_rq_sge *rqe =
3040 qed_chain_produce(&qp->rq.pbl);
3041
3042 /* First one must include the number
3043 * of SGE in the list
3044 */
3045 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3046 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3047
3048 RQ_SGE_SET(rqe, 0, 0, flags);
3049 i = 1;
3050 }
3051
3052 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3053 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3054
3055 qedr_inc_sw_prod(&qp->rq);
3056
3057 /* Flush all the writes before signalling doorbell */
3058 wmb();
3059
3060 qp->rq.db_data.data.value++;
3061
3062 writel(qp->rq.db_data.raw, qp->rq.db);
3063
3064 /* Make sure write sticks */
3065 mmiowb();
3066
3067 wr = wr->next;
3068 }
3069
3070 spin_unlock_irqrestore(&qp->q_lock, flags);
3071
3072 return status;
3073}
3074
3075static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3076{
3077 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3078
3079 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3080 cq->pbl_toggle;
3081}
3082
3083static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3084{
3085 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3086 struct qedr_qp *qp;
3087
3088 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3089 resp_cqe->qp_handle.lo,
3090 u64);
3091 return qp;
3092}
3093
3094static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3095{
3096 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3097
3098 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3099}
3100
3101/* Return latest CQE (needs processing) */
3102static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3103{
3104 return cq->latest_cqe;
3105}
3106
3107/* In fmr we need to increase the number of fmr completed counter for the fmr
3108 * algorithm determining whether we can free a pbl or not.
3109 * we need to perform this whether the work request was signaled or not. for
3110 * this purpose we call this function from the condition that checks if a wr
3111 * should be skipped, to make sure we don't miss it ( possibly this fmr
3112 * operation was not signalted)
3113 */
3114static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3115{
3116 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3117 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3118}
3119
3120static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3121 struct qedr_cq *cq, int num_entries,
3122 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3123 int force)
3124{
3125 u16 cnt = 0;
3126
3127 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3128 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3129 qedr_chk_if_fmr(qp);
3130 /* skip WC */
3131 goto next_cqe;
3132 }
3133
3134 /* fill WC */
3135 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003136 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003137 wc->wc_flags = 0;
3138 wc->src_qp = qp->id;
3139 wc->qp = &qp->ibqp;
3140
3141 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3142 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3143
3144 switch (wc->opcode) {
3145 case IB_WC_RDMA_WRITE:
3146 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3147 break;
3148 case IB_WC_COMP_SWAP:
3149 case IB_WC_FETCH_ADD:
3150 wc->byte_len = 8;
3151 break;
3152 case IB_WC_REG_MR:
3153 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3154 break;
3155 default:
3156 break;
3157 }
3158
3159 num_entries--;
3160 wc++;
3161 cnt++;
3162next_cqe:
3163 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3164 qed_chain_consume(&qp->sq.pbl);
3165 qedr_inc_sw_cons(&qp->sq);
3166 }
3167
3168 return cnt;
3169}
3170
3171static int qedr_poll_cq_req(struct qedr_dev *dev,
3172 struct qedr_qp *qp, struct qedr_cq *cq,
3173 int num_entries, struct ib_wc *wc,
3174 struct rdma_cqe_requester *req)
3175{
3176 int cnt = 0;
3177
3178 switch (req->status) {
3179 case RDMA_CQE_REQ_STS_OK:
3180 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3181 IB_WC_SUCCESS, 0);
3182 break;
3183 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003184 if (qp->state != QED_ROCE_QP_STATE_ERR)
3185 DP_ERR(dev,
3186 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3187 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003188 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003189 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003190 break;
3191 default:
3192 /* process all WQE before the cosumer */
3193 qp->state = QED_ROCE_QP_STATE_ERR;
3194 cnt = process_req(dev, qp, cq, num_entries, wc,
3195 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3196 wc += cnt;
3197 /* if we have extra WC fill it with actual error info */
3198 if (cnt < num_entries) {
3199 enum ib_wc_status wc_status;
3200
3201 switch (req->status) {
3202 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3203 DP_ERR(dev,
3204 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3205 cq->icid, qp->icid);
3206 wc_status = IB_WC_BAD_RESP_ERR;
3207 break;
3208 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3209 DP_ERR(dev,
3210 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3211 cq->icid, qp->icid);
3212 wc_status = IB_WC_LOC_LEN_ERR;
3213 break;
3214 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3215 DP_ERR(dev,
3216 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3217 cq->icid, qp->icid);
3218 wc_status = IB_WC_LOC_QP_OP_ERR;
3219 break;
3220 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3221 DP_ERR(dev,
3222 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3223 cq->icid, qp->icid);
3224 wc_status = IB_WC_LOC_PROT_ERR;
3225 break;
3226 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3227 DP_ERR(dev,
3228 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3229 cq->icid, qp->icid);
3230 wc_status = IB_WC_MW_BIND_ERR;
3231 break;
3232 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3233 DP_ERR(dev,
3234 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3235 cq->icid, qp->icid);
3236 wc_status = IB_WC_REM_INV_REQ_ERR;
3237 break;
3238 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3239 DP_ERR(dev,
3240 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3241 cq->icid, qp->icid);
3242 wc_status = IB_WC_REM_ACCESS_ERR;
3243 break;
3244 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3245 DP_ERR(dev,
3246 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3247 cq->icid, qp->icid);
3248 wc_status = IB_WC_REM_OP_ERR;
3249 break;
3250 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3251 DP_ERR(dev,
3252 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3253 cq->icid, qp->icid);
3254 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3255 break;
3256 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3257 DP_ERR(dev,
3258 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3259 cq->icid, qp->icid);
3260 wc_status = IB_WC_RETRY_EXC_ERR;
3261 break;
3262 default:
3263 DP_ERR(dev,
3264 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3265 cq->icid, qp->icid);
3266 wc_status = IB_WC_GENERAL_ERR;
3267 }
3268 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3269 wc_status, 1);
3270 }
3271 }
3272
3273 return cnt;
3274}
3275
3276static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3277 struct qedr_cq *cq, struct ib_wc *wc,
3278 struct rdma_cqe_responder *resp, u64 wr_id)
3279{
3280 enum ib_wc_status wc_status = IB_WC_SUCCESS;
3281 u8 flags;
3282
3283 wc->opcode = IB_WC_RECV;
3284 wc->wc_flags = 0;
3285
3286 switch (resp->status) {
3287 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3288 wc_status = IB_WC_LOC_ACCESS_ERR;
3289 break;
3290 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3291 wc_status = IB_WC_LOC_LEN_ERR;
3292 break;
3293 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3294 wc_status = IB_WC_LOC_QP_OP_ERR;
3295 break;
3296 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3297 wc_status = IB_WC_LOC_PROT_ERR;
3298 break;
3299 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3300 wc_status = IB_WC_MW_BIND_ERR;
3301 break;
3302 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3303 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3304 break;
3305 case RDMA_CQE_RESP_STS_OK:
3306 wc_status = IB_WC_SUCCESS;
3307 wc->byte_len = le32_to_cpu(resp->length);
3308
3309 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3310
3311 if (flags == QEDR_RESP_RDMA_IMM)
3312 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3313
3314 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3315 wc->ex.imm_data =
3316 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3317 wc->wc_flags |= IB_WC_WITH_IMM;
3318 }
3319 break;
3320 default:
3321 wc->status = IB_WC_GENERAL_ERR;
3322 DP_ERR(dev, "Invalid CQE status detected\n");
3323 }
3324
3325 /* fill WC */
3326 wc->status = wc_status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003327 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003328 wc->src_qp = qp->id;
3329 wc->qp = &qp->ibqp;
3330 wc->wr_id = wr_id;
3331}
3332
3333static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3334 struct qedr_cq *cq, struct ib_wc *wc,
3335 struct rdma_cqe_responder *resp)
3336{
3337 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3338
3339 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3340
3341 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3342 qed_chain_consume(&qp->rq.pbl);
3343 qedr_inc_sw_cons(&qp->rq);
3344
3345 return 1;
3346}
3347
3348static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3349 int num_entries, struct ib_wc *wc, u16 hw_cons)
3350{
3351 u16 cnt = 0;
3352
3353 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3354 /* fill WC */
3355 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003356 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003357 wc->wc_flags = 0;
3358 wc->src_qp = qp->id;
3359 wc->byte_len = 0;
3360 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3361 wc->qp = &qp->ibqp;
3362 num_entries--;
3363 wc++;
3364 cnt++;
3365 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3366 qed_chain_consume(&qp->rq.pbl);
3367 qedr_inc_sw_cons(&qp->rq);
3368 }
3369
3370 return cnt;
3371}
3372
3373static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3374 struct rdma_cqe_responder *resp, int *update)
3375{
3376 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3377 consume_cqe(cq);
3378 *update |= 1;
3379 }
3380}
3381
3382static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3383 struct qedr_cq *cq, int num_entries,
3384 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3385 int *update)
3386{
3387 int cnt;
3388
3389 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3390 cnt = process_resp_flush(qp, cq, num_entries, wc,
3391 resp->rq_cons);
3392 try_consume_resp_cqe(cq, qp, resp, update);
3393 } else {
3394 cnt = process_resp_one(dev, qp, cq, wc, resp);
3395 consume_cqe(cq);
3396 *update |= 1;
3397 }
3398
3399 return cnt;
3400}
3401
3402static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3403 struct rdma_cqe_requester *req, int *update)
3404{
3405 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3406 consume_cqe(cq);
3407 *update |= 1;
3408 }
3409}
3410
3411int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3412{
3413 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3414 struct qedr_cq *cq = get_qedr_cq(ibcq);
3415 union rdma_cqe *cqe = cq->latest_cqe;
3416 u32 old_cons, new_cons;
3417 unsigned long flags;
3418 int update = 0;
3419 int done = 0;
3420
Ram Amrani04886772016-10-10 13:15:38 +03003421 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3422 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3423
Ram Amraniafa0e132016-10-10 13:15:36 +03003424 spin_lock_irqsave(&cq->cq_lock, flags);
3425 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3426 while (num_entries && is_valid_cqe(cq, cqe)) {
3427 struct qedr_qp *qp;
3428 int cnt = 0;
3429
3430 /* prevent speculative reads of any field of CQE */
3431 rmb();
3432
3433 qp = cqe_get_qp(cqe);
3434 if (!qp) {
3435 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3436 break;
3437 }
3438
3439 wc->qp = &qp->ibqp;
3440
3441 switch (cqe_get_type(cqe)) {
3442 case RDMA_CQE_TYPE_REQUESTER:
3443 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3444 &cqe->req);
3445 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3446 break;
3447 case RDMA_CQE_TYPE_RESPONDER_RQ:
3448 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3449 &cqe->resp, &update);
3450 break;
3451 case RDMA_CQE_TYPE_INVALID:
3452 default:
3453 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3454 cqe_get_type(cqe));
3455 }
3456 num_entries -= cnt;
3457 wc += cnt;
3458 done += cnt;
3459
3460 cqe = get_cqe(cq);
3461 }
3462 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3463
3464 cq->cq_cons += new_cons - old_cons;
3465
3466 if (update)
3467 /* doorbell notifies abount latest VALID entry,
3468 * but chain already point to the next INVALID one
3469 */
3470 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3471
3472 spin_unlock_irqrestore(&cq->cq_lock, flags);
3473 return done;
3474}
Ram Amrani993d1b52016-10-10 13:15:39 +03003475
3476int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3477 u8 port_num,
3478 const struct ib_wc *in_wc,
3479 const struct ib_grh *in_grh,
3480 const struct ib_mad_hdr *mad_hdr,
3481 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3482 size_t *out_mad_size, u16 *out_mad_pkey_index)
3483{
3484 struct qedr_dev *dev = get_qedr_dev(ibdev);
3485
3486 DP_DEBUG(dev, QEDR_MSG_GSI,
3487 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3488 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3489 mad_hdr->class_specific, mad_hdr->class_version,
3490 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3491 return IB_MAD_RESULT_SUCCESS;
3492}
3493
3494int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3495 struct ib_port_immutable *immutable)
3496{
3497 struct ib_port_attr attr;
3498 int err;
3499
Or Gerlitzc4550c62017-01-24 13:02:39 +02003500 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3501 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3502
3503 err = ib_query_port(ibdev, port_num, &attr);
Ram Amrani993d1b52016-10-10 13:15:39 +03003504 if (err)
3505 return err;
3506
3507 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3508 immutable->gid_tbl_len = attr.gid_tbl_len;
Ram Amrani993d1b52016-10-10 13:15:39 +03003509 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3510
3511 return 0;
3512}