blob: ccff6c6e3f3317ce554c0b123388e3fee8742667 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
46#include "qedr_hsi.h"
47#include <linux/qed/qed_if.h>
48#include "qedr.h"
49#include "verbs.h"
50#include <rdma/qedr-abi.h>
Ram Amranicecbcdd2016-10-10 13:15:34 +030051#include "qedr_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030052
Ram Amrania7efd772016-10-10 13:15:33 +030053#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
54
55int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
56{
57 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
58 return -EINVAL;
59
60 *pkey = QEDR_ROCE_PKEY_DEFAULT;
61 return 0;
62}
63
Ram Amraniac1b36e2016-10-10 13:15:32 +030064int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
65 union ib_gid *sgid)
66{
67 struct qedr_dev *dev = get_qedr_dev(ibdev);
68 int rc = 0;
69
70 if (!rdma_cap_roce_gid_table(ibdev, port))
71 return -ENODEV;
72
73 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
74 if (rc == -EAGAIN) {
75 memcpy(sgid, &zgid, sizeof(*sgid));
76 return 0;
77 }
78
79 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80 sgid->global.interface_id, sgid->global.subnet_prefix);
81
82 return rc;
83}
84
85int qedr_add_gid(struct ib_device *device, u8 port_num,
86 unsigned int index, const union ib_gid *gid,
87 const struct ib_gid_attr *attr, void **context)
88{
89 if (!rdma_cap_roce_gid_table(device, port_num))
90 return -EINVAL;
91
92 if (port_num > QEDR_MAX_PORT)
93 return -EINVAL;
94
95 if (!context)
96 return -EINVAL;
97
98 return 0;
99}
100
101int qedr_del_gid(struct ib_device *device, u8 port_num,
102 unsigned int index, void **context)
103{
104 if (!rdma_cap_roce_gid_table(device, port_num))
105 return -EINVAL;
106
107 if (port_num > QEDR_MAX_PORT)
108 return -EINVAL;
109
110 if (!context)
111 return -EINVAL;
112
113 return 0;
114}
115
116int qedr_query_device(struct ib_device *ibdev,
117 struct ib_device_attr *attr, struct ib_udata *udata)
118{
119 struct qedr_dev *dev = get_qedr_dev(ibdev);
120 struct qedr_device_attr *qattr = &dev->attr;
121
122 if (!dev->rdma_ctx) {
123 DP_ERR(dev,
124 "qedr_query_device called with invalid params rdma_ctx=%p\n",
125 dev->rdma_ctx);
126 return -EINVAL;
127 }
128
129 memset(attr, 0, sizeof(*attr));
130
131 attr->fw_ver = qattr->fw_ver;
132 attr->sys_image_guid = qattr->sys_image_guid;
133 attr->max_mr_size = qattr->max_mr_size;
134 attr->page_size_cap = qattr->page_size_caps;
135 attr->vendor_id = qattr->vendor_id;
136 attr->vendor_part_id = qattr->vendor_part_id;
137 attr->hw_ver = qattr->hw_ver;
138 attr->max_qp = qattr->max_qp;
139 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141 IB_DEVICE_RC_RNR_NAK_GEN |
142 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
143
144 attr->max_sge = qattr->max_sge;
145 attr->max_sge_rd = qattr->max_sge;
146 attr->max_cq = qattr->max_cq;
147 attr->max_cqe = qattr->max_cqe;
148 attr->max_mr = qattr->max_mr;
149 attr->max_mw = qattr->max_mw;
150 attr->max_pd = qattr->max_pd;
151 attr->atomic_cap = dev->atomic_cap;
152 attr->max_fmr = qattr->max_fmr;
153 attr->max_map_per_fmr = 16;
154 attr->max_qp_init_rd_atom =
155 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156 attr->max_qp_rd_atom =
157 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158 attr->max_qp_init_rd_atom);
159
160 attr->max_srq = qattr->max_srq;
161 attr->max_srq_sge = qattr->max_srq_sge;
162 attr->max_srq_wr = qattr->max_srq_wr;
163
164 attr->local_ca_ack_delay = qattr->dev_ack_delay;
165 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167 attr->max_ah = qattr->max_ah;
168
169 return 0;
170}
171
172#define QEDR_SPEED_SDR (1)
173#define QEDR_SPEED_DDR (2)
174#define QEDR_SPEED_QDR (4)
175#define QEDR_SPEED_FDR10 (8)
176#define QEDR_SPEED_FDR (16)
177#define QEDR_SPEED_EDR (32)
178
179static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
180 u8 *ib_width)
181{
182 switch (speed) {
183 case 1000:
184 *ib_speed = QEDR_SPEED_SDR;
185 *ib_width = IB_WIDTH_1X;
186 break;
187 case 10000:
188 *ib_speed = QEDR_SPEED_QDR;
189 *ib_width = IB_WIDTH_1X;
190 break;
191
192 case 20000:
193 *ib_speed = QEDR_SPEED_DDR;
194 *ib_width = IB_WIDTH_4X;
195 break;
196
197 case 25000:
198 *ib_speed = QEDR_SPEED_EDR;
199 *ib_width = IB_WIDTH_1X;
200 break;
201
202 case 40000:
203 *ib_speed = QEDR_SPEED_QDR;
204 *ib_width = IB_WIDTH_4X;
205 break;
206
207 case 50000:
208 *ib_speed = QEDR_SPEED_QDR;
209 *ib_width = IB_WIDTH_4X;
210 break;
211
212 case 100000:
213 *ib_speed = QEDR_SPEED_EDR;
214 *ib_width = IB_WIDTH_4X;
215 break;
216
217 default:
218 /* Unsupported */
219 *ib_speed = QEDR_SPEED_SDR;
220 *ib_width = IB_WIDTH_1X;
221 }
222}
223
224int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
225{
226 struct qedr_dev *dev;
227 struct qed_rdma_port *rdma_port;
228
229 dev = get_qedr_dev(ibdev);
230 if (port > 1) {
231 DP_ERR(dev, "invalid_port=0x%x\n", port);
232 return -EINVAL;
233 }
234
235 if (!dev->rdma_ctx) {
236 DP_ERR(dev, "rdma_ctx is NULL\n");
237 return -EINVAL;
238 }
239
240 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
241 memset(attr, 0, sizeof(*attr));
242
243 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244 attr->state = IB_PORT_ACTIVE;
245 attr->phys_state = 5;
246 } else {
247 attr->state = IB_PORT_DOWN;
248 attr->phys_state = 3;
249 }
250 attr->max_mtu = IB_MTU_4096;
251 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
252 attr->lid = 0;
253 attr->lmc = 0;
254 attr->sm_lid = 0;
255 attr->sm_sl = 0;
256 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257 attr->gid_tbl_len = QEDR_MAX_SGID;
258 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260 attr->qkey_viol_cntr = 0;
261 get_link_speed_and_width(rdma_port->link_speed,
262 &attr->active_speed, &attr->active_width);
263 attr->max_msg_sz = rdma_port->max_msg_size;
264 attr->max_vl_num = 4;
265
266 return 0;
267}
268
269int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270 struct ib_port_modify *props)
271{
272 struct qedr_dev *dev;
273
274 dev = get_qedr_dev(ibdev);
275 if (port > 1) {
276 DP_ERR(dev, "invalid_port=0x%x\n", port);
277 return -EINVAL;
278 }
279
280 return 0;
281}
282
283static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
284 unsigned long len)
285{
286 struct qedr_mm *mm;
287
288 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
289 if (!mm)
290 return -ENOMEM;
291
292 mm->key.phy_addr = phy_addr;
293 /* This function might be called with a length which is not a multiple
294 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295 * forces this granularity by increasing the requested size if needed.
296 * When qedr_mmap is called, it will search the list with the updated
297 * length as a key. To prevent search failures, the length is rounded up
298 * in advance to PAGE_SIZE.
299 */
300 mm->key.len = roundup(len, PAGE_SIZE);
301 INIT_LIST_HEAD(&mm->entry);
302
303 mutex_lock(&uctx->mm_list_lock);
304 list_add(&mm->entry, &uctx->mm_head);
305 mutex_unlock(&uctx->mm_list_lock);
306
307 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309 (unsigned long long)mm->key.phy_addr,
310 (unsigned long)mm->key.len, uctx);
311
312 return 0;
313}
314
315static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
316 unsigned long len)
317{
318 bool found = false;
319 struct qedr_mm *mm;
320
321 mutex_lock(&uctx->mm_list_lock);
322 list_for_each_entry(mm, &uctx->mm_head, entry) {
323 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
324 continue;
325
326 found = true;
327 break;
328 }
329 mutex_unlock(&uctx->mm_list_lock);
330 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332 mm->key.phy_addr, mm->key.len, uctx, found);
333
334 return found;
335}
336
337struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338 struct ib_udata *udata)
339{
340 int rc;
341 struct qedr_ucontext *ctx;
342 struct qedr_alloc_ucontext_resp uresp;
343 struct qedr_dev *dev = get_qedr_dev(ibdev);
344 struct qed_rdma_add_user_out_params oparams;
345
346 if (!udata)
347 return ERR_PTR(-EFAULT);
348
349 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
350 if (!ctx)
351 return ERR_PTR(-ENOMEM);
352
353 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
354 if (rc) {
355 DP_ERR(dev,
356 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
357 rc);
358 goto err;
359 }
360
361 ctx->dpi = oparams.dpi;
362 ctx->dpi_addr = oparams.dpi_addr;
363 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364 ctx->dpi_size = oparams.dpi_size;
365 INIT_LIST_HEAD(&ctx->mm_head);
366 mutex_init(&ctx->mm_list_lock);
367
368 memset(&uresp, 0, sizeof(uresp));
369
370 uresp.db_pa = ctx->dpi_phys_addr;
371 uresp.db_size = ctx->dpi_size;
372 uresp.max_send_wr = dev->attr.max_sqe;
373 uresp.max_recv_wr = dev->attr.max_rqe;
374 uresp.max_srq_wr = dev->attr.max_srq_wr;
375 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378 uresp.max_cqes = QEDR_MAX_CQES;
379
380 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
381 if (rc)
382 goto err;
383
384 ctx->dev = dev;
385
386 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
387 if (rc)
388 goto err;
389
390 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
391 &ctx->ibucontext);
392 return &ctx->ibucontext;
393
394err:
395 kfree(ctx);
396 return ERR_PTR(rc);
397}
398
399int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
400{
401 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402 struct qedr_mm *mm, *tmp;
403 int status = 0;
404
405 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
406 uctx);
407 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
408
409 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412 mm->key.phy_addr, mm->key.len, uctx);
413 list_del(&mm->entry);
414 kfree(mm);
415 }
416
417 kfree(uctx);
418 return status;
419}
420
421int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
422{
423 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424 struct qedr_dev *dev = get_qedr_dev(context->device);
425 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426 u64 unmapped_db = dev->db_phys_addr;
427 unsigned long len = (vma->vm_end - vma->vm_start);
428 int rc = 0;
429 bool found;
430
431 DP_DEBUG(dev, QEDR_MSG_INIT,
432 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434 if (vma->vm_start & (PAGE_SIZE - 1)) {
435 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
436 vma->vm_start);
437 return -EINVAL;
438 }
439
440 found = qedr_search_mmap(ucontext, vm_page, len);
441 if (!found) {
442 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
443 vma->vm_pgoff);
444 return -EINVAL;
445 }
446
447 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
448
449 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
450 dev->db_size))) {
451 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452 if (vma->vm_flags & VM_READ) {
453 DP_ERR(dev, "Trying to map doorbell bar for read\n");
454 return -EPERM;
455 }
456
457 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
458
459 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460 PAGE_SIZE, vma->vm_page_prot);
461 } else {
462 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463 rc = remap_pfn_range(vma, vma->vm_start,
464 vma->vm_pgoff, len, vma->vm_page_prot);
465 }
466 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
467 return rc;
468}
Ram Amrania7efd772016-10-10 13:15:33 +0300469
470struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata)
472{
473 struct qedr_dev *dev = get_qedr_dev(ibdev);
474 struct qedr_ucontext *uctx = NULL;
475 struct qedr_alloc_pd_uresp uresp;
476 struct qedr_pd *pd;
477 u16 pd_id;
478 int rc;
479
480 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
481 (udata && context) ? "User Lib" : "Kernel");
482
483 if (!dev->rdma_ctx) {
484 DP_ERR(dev, "invlaid RDMA context\n");
485 return ERR_PTR(-EINVAL);
486 }
487
488 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
489 if (!pd)
490 return ERR_PTR(-ENOMEM);
491
492 dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
493
494 uresp.pd_id = pd_id;
495 pd->pd_id = pd_id;
496
497 if (udata && context) {
498 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
499 if (rc)
500 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501 uctx = get_qedr_ucontext(context);
502 uctx->pd = pd;
503 pd->uctx = uctx;
504 }
505
506 return &pd->ibpd;
507}
508
509int qedr_dealloc_pd(struct ib_pd *ibpd)
510{
511 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
512 struct qedr_pd *pd = get_qedr_pd(ibpd);
513
514 if (!pd)
515 pr_err("Invalid PD received in dealloc_pd\n");
516
517 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
518 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
519
520 kfree(pd);
521
522 return 0;
523}
524
525static void qedr_free_pbl(struct qedr_dev *dev,
526 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
527{
528 struct pci_dev *pdev = dev->pdev;
529 int i;
530
531 for (i = 0; i < pbl_info->num_pbls; i++) {
532 if (!pbl[i].va)
533 continue;
534 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
535 pbl[i].va, pbl[i].pa);
536 }
537
538 kfree(pbl);
539}
540
541#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
542#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
543
544#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
545#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
546#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
547
548static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
549 struct qedr_pbl_info *pbl_info,
550 gfp_t flags)
551{
552 struct pci_dev *pdev = dev->pdev;
553 struct qedr_pbl *pbl_table;
554 dma_addr_t *pbl_main_tbl;
555 dma_addr_t pa;
556 void *va;
557 int i;
558
559 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
560 if (!pbl_table)
561 return ERR_PTR(-ENOMEM);
562
563 for (i = 0; i < pbl_info->num_pbls; i++) {
564 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
565 &pa, flags);
566 if (!va)
567 goto err;
568
569 memset(va, 0, pbl_info->pbl_size);
570 pbl_table[i].va = va;
571 pbl_table[i].pa = pa;
572 }
573
574 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
575 * the first one with physical pointers to all of the rest
576 */
577 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
578 for (i = 0; i < pbl_info->num_pbls - 1; i++)
579 pbl_main_tbl[i] = pbl_table[i + 1].pa;
580
581 return pbl_table;
582
583err:
584 for (i--; i >= 0; i--)
585 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
586 pbl_table[i].va, pbl_table[i].pa);
587
588 qedr_free_pbl(dev, pbl_info, pbl_table);
589
590 return ERR_PTR(-ENOMEM);
591}
592
593static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
594 struct qedr_pbl_info *pbl_info,
595 u32 num_pbes, int two_layer_capable)
596{
597 u32 pbl_capacity;
598 u32 pbl_size;
599 u32 num_pbls;
600
601 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
602 if (num_pbes > MAX_PBES_TWO_LAYER) {
603 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
604 num_pbes);
605 return -EINVAL;
606 }
607
608 /* calculate required pbl page size */
609 pbl_size = MIN_FW_PBL_PAGE_SIZE;
610 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
611 NUM_PBES_ON_PAGE(pbl_size);
612
613 while (pbl_capacity < num_pbes) {
614 pbl_size *= 2;
615 pbl_capacity = pbl_size / sizeof(u64);
616 pbl_capacity = pbl_capacity * pbl_capacity;
617 }
618
619 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
620 num_pbls++; /* One for the layer0 ( points to the pbls) */
621 pbl_info->two_layered = true;
622 } else {
623 /* One layered PBL */
624 num_pbls = 1;
625 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
626 roundup_pow_of_two((num_pbes * sizeof(u64))));
627 pbl_info->two_layered = false;
628 }
629
630 pbl_info->num_pbls = num_pbls;
631 pbl_info->pbl_size = pbl_size;
632 pbl_info->num_pbes = num_pbes;
633
634 DP_DEBUG(dev, QEDR_MSG_MR,
635 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
636 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
637
638 return 0;
639}
640
641static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
642 struct qedr_pbl *pbl,
643 struct qedr_pbl_info *pbl_info)
644{
645 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
646 struct qedr_pbl *pbl_tbl;
647 struct scatterlist *sg;
648 struct regpair *pbe;
649 int entry;
650 u32 addr;
651
652 if (!pbl_info->num_pbes)
653 return;
654
655 /* If we have a two layered pbl, the first pbl points to the rest
656 * of the pbls and the first entry lays on the second pbl in the table
657 */
658 if (pbl_info->two_layered)
659 pbl_tbl = &pbl[1];
660 else
661 pbl_tbl = pbl;
662
663 pbe = (struct regpair *)pbl_tbl->va;
664 if (!pbe) {
665 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
666 return;
667 }
668
669 pbe_cnt = 0;
670
671 shift = ilog2(umem->page_size);
672
673 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
674 pages = sg_dma_len(sg) >> shift;
675 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
676 /* store the page address in pbe */
677 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
678 umem->page_size * pg_cnt);
679 addr = upper_32_bits(sg_dma_address(sg) +
680 umem->page_size * pg_cnt);
681 pbe->hi = cpu_to_le32(addr);
682 pbe_cnt++;
683 total_num_pbes++;
684 pbe++;
685
686 if (total_num_pbes == pbl_info->num_pbes)
687 return;
688
689 /* If the given pbl is full storing the pbes,
690 * move to next pbl.
691 */
692 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
693 pbl_tbl++;
694 pbe = (struct regpair *)pbl_tbl->va;
695 pbe_cnt = 0;
696 }
697 }
698 }
699}
700
701static int qedr_copy_cq_uresp(struct qedr_dev *dev,
702 struct qedr_cq *cq, struct ib_udata *udata)
703{
704 struct qedr_create_cq_uresp uresp;
705 int rc;
706
707 memset(&uresp, 0, sizeof(uresp));
708
709 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
710 uresp.icid = cq->icid;
711
712 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
713 if (rc)
714 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
715
716 return rc;
717}
718
719static void consume_cqe(struct qedr_cq *cq)
720{
721 if (cq->latest_cqe == cq->toggle_cqe)
722 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
723
724 cq->latest_cqe = qed_chain_consume(&cq->pbl);
725}
726
727static inline int qedr_align_cq_entries(int entries)
728{
729 u64 size, aligned_size;
730
731 /* We allocate an extra entry that we don't report to the FW. */
732 size = (entries + 1) * QEDR_CQE_SIZE;
733 aligned_size = ALIGN(size, PAGE_SIZE);
734
735 return aligned_size / QEDR_CQE_SIZE;
736}
737
738static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
739 struct qedr_dev *dev,
740 struct qedr_userq *q,
741 u64 buf_addr, size_t buf_len,
742 int access, int dmasync)
743{
744 int page_cnt;
745 int rc;
746
747 q->buf_addr = buf_addr;
748 q->buf_len = buf_len;
749 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
750 if (IS_ERR(q->umem)) {
751 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
752 PTR_ERR(q->umem));
753 return PTR_ERR(q->umem);
754 }
755
756 page_cnt = ib_umem_page_count(q->umem);
757 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
758 if (rc)
759 goto err0;
760
761 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
762 if (IS_ERR_OR_NULL(q->pbl_tbl))
763 goto err0;
764
765 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
766
767 return 0;
768
769err0:
770 ib_umem_release(q->umem);
771
772 return rc;
773}
774
775static inline void qedr_init_cq_params(struct qedr_cq *cq,
776 struct qedr_ucontext *ctx,
777 struct qedr_dev *dev, int vector,
778 int chain_entries, int page_cnt,
779 u64 pbl_ptr,
780 struct qed_rdma_create_cq_in_params
781 *params)
782{
783 memset(params, 0, sizeof(*params));
784 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
785 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
786 params->cnq_id = vector;
787 params->cq_size = chain_entries - 1;
788 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
789 params->pbl_num_pages = page_cnt;
790 params->pbl_ptr = pbl_ptr;
791 params->pbl_two_level = 0;
792}
793
794static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
795{
796 /* Flush data before signalling doorbell */
797 wmb();
798 cq->db.data.agg_flags = flags;
799 cq->db.data.value = cpu_to_le32(cons);
800 writeq(cq->db.raw, cq->db_addr);
801
802 /* Make sure write would stick */
803 mmiowb();
804}
805
806int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
807{
808 struct qedr_cq *cq = get_qedr_cq(ibcq);
809 unsigned long sflags;
810
811 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
812 return 0;
813
814 spin_lock_irqsave(&cq->cq_lock, sflags);
815
816 cq->arm_flags = 0;
817
818 if (flags & IB_CQ_SOLICITED)
819 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
820
821 if (flags & IB_CQ_NEXT_COMP)
822 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
823
824 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
825
826 spin_unlock_irqrestore(&cq->cq_lock, sflags);
827
828 return 0;
829}
830
831struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
832 const struct ib_cq_init_attr *attr,
833 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
834{
835 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
836 struct qed_rdma_destroy_cq_out_params destroy_oparams;
837 struct qed_rdma_destroy_cq_in_params destroy_iparams;
838 struct qedr_dev *dev = get_qedr_dev(ibdev);
839 struct qed_rdma_create_cq_in_params params;
840 struct qedr_create_cq_ureq ureq;
841 int vector = attr->comp_vector;
842 int entries = attr->cqe;
843 struct qedr_cq *cq;
844 int chain_entries;
845 int page_cnt;
846 u64 pbl_ptr;
847 u16 icid;
848 int rc;
849
850 DP_DEBUG(dev, QEDR_MSG_INIT,
851 "create_cq: called from %s. entries=%d, vector=%d\n",
852 udata ? "User Lib" : "Kernel", entries, vector);
853
854 if (entries > QEDR_MAX_CQES) {
855 DP_ERR(dev,
856 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
857 entries, QEDR_MAX_CQES);
858 return ERR_PTR(-EINVAL);
859 }
860
861 chain_entries = qedr_align_cq_entries(entries);
862 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
863
864 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
865 if (!cq)
866 return ERR_PTR(-ENOMEM);
867
868 if (udata) {
869 memset(&ureq, 0, sizeof(ureq));
870 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
871 DP_ERR(dev,
872 "create cq: problem copying data from user space\n");
873 goto err0;
874 }
875
876 if (!ureq.len) {
877 DP_ERR(dev,
878 "create cq: cannot create a cq with 0 entries\n");
879 goto err0;
880 }
881
882 cq->cq_type = QEDR_CQ_TYPE_USER;
883
884 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
885 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
886 if (rc)
887 goto err0;
888
889 pbl_ptr = cq->q.pbl_tbl->pa;
890 page_cnt = cq->q.pbl_info.num_pbes;
891 } else {
892 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
893
894 rc = dev->ops->common->chain_alloc(dev->cdev,
895 QED_CHAIN_USE_TO_CONSUME,
896 QED_CHAIN_MODE_PBL,
897 QED_CHAIN_CNT_TYPE_U32,
898 chain_entries,
899 sizeof(union rdma_cqe),
900 &cq->pbl);
901 if (rc)
902 goto err1;
903
904 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
905 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
906 }
907
908 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
909 pbl_ptr, &params);
910
911 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
912 if (rc)
913 goto err2;
914
915 cq->icid = icid;
916 cq->sig = QEDR_CQ_MAGIC_NUMBER;
917 spin_lock_init(&cq->cq_lock);
918
919 if (ib_ctx) {
920 rc = qedr_copy_cq_uresp(dev, cq, udata);
921 if (rc)
922 goto err3;
923 } else {
924 /* Generate doorbell address. */
925 cq->db_addr = dev->db_addr +
926 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
927 cq->db.data.icid = cq->icid;
928 cq->db.data.params = DB_AGG_CMD_SET <<
929 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
930
931 /* point to the very last element, passing it we will toggle */
932 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
933 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
934 cq->latest_cqe = NULL;
935 consume_cqe(cq);
936 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
937 }
938
939 DP_DEBUG(dev, QEDR_MSG_CQ,
940 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
941 cq->icid, cq, params.cq_size);
942
943 return &cq->ibcq;
944
945err3:
946 destroy_iparams.icid = cq->icid;
947 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
948 &destroy_oparams);
949err2:
950 if (udata)
951 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
952 else
953 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
954err1:
955 if (udata)
956 ib_umem_release(cq->q.umem);
957err0:
958 kfree(cq);
959 return ERR_PTR(-EINVAL);
960}
961
962int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
963{
964 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
965 struct qedr_cq *cq = get_qedr_cq(ibcq);
966
967 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
968
969 return 0;
970}
971
972int qedr_destroy_cq(struct ib_cq *ibcq)
973{
974 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
975 struct qed_rdma_destroy_cq_out_params oparams;
976 struct qed_rdma_destroy_cq_in_params iparams;
977 struct qedr_cq *cq = get_qedr_cq(ibcq);
978
979 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
980
981 /* GSIs CQs are handled by driver, so they don't exist in the FW */
982 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
983 iparams.icid = cq->icid;
984 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
985 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
986 }
987
988 if (ibcq->uobject && ibcq->uobject->context) {
989 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
990 ib_umem_release(cq->q.umem);
991 }
992
993 kfree(cq);
994
995 return 0;
996}
Ram Amranicecbcdd2016-10-10 13:15:34 +0300997
998static inline int get_gid_info_from_table(struct ib_qp *ibqp,
999 struct ib_qp_attr *attr,
1000 int attr_mask,
1001 struct qed_rdma_modify_qp_in_params
1002 *qp_params)
1003{
1004 enum rdma_network_type nw_type;
1005 struct ib_gid_attr gid_attr;
1006 union ib_gid gid;
1007 u32 ipv4_addr;
1008 int rc = 0;
1009 int i;
1010
1011 rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1012 attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1013 if (rc)
1014 return rc;
1015
1016 if (!memcmp(&gid, &zgid, sizeof(gid)))
1017 return -ENOENT;
1018
1019 if (gid_attr.ndev) {
1020 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1021
1022 dev_put(gid_attr.ndev);
1023 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1024 switch (nw_type) {
1025 case RDMA_NETWORK_IPV6:
1026 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1027 sizeof(qp_params->sgid));
1028 memcpy(&qp_params->dgid.bytes[0],
1029 &attr->ah_attr.grh.dgid,
1030 sizeof(qp_params->dgid));
1031 qp_params->roce_mode = ROCE_V2_IPV6;
1032 SET_FIELD(qp_params->modify_flags,
1033 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1034 break;
1035 case RDMA_NETWORK_IB:
1036 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1037 sizeof(qp_params->sgid));
1038 memcpy(&qp_params->dgid.bytes[0],
1039 &attr->ah_attr.grh.dgid,
1040 sizeof(qp_params->dgid));
1041 qp_params->roce_mode = ROCE_V1;
1042 break;
1043 case RDMA_NETWORK_IPV4:
1044 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1045 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1046 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1047 qp_params->sgid.ipv4_addr = ipv4_addr;
1048 ipv4_addr =
1049 qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1050 qp_params->dgid.ipv4_addr = ipv4_addr;
1051 SET_FIELD(qp_params->modify_flags,
1052 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1053 qp_params->roce_mode = ROCE_V2_IPV4;
1054 break;
1055 }
1056 }
1057
1058 for (i = 0; i < 4; i++) {
1059 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1060 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1061 }
1062
1063 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1064 qp_params->vlan_id = 0;
1065
1066 return 0;
1067}
1068
1069static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1070{
1071 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1072 ib_umem_release(qp->usq.umem);
1073}
1074
1075static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1076{
1077 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1078 ib_umem_release(qp->urq.umem);
1079}
1080
1081static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1082{
1083 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1084 kfree(qp->wqe_wr_id);
1085}
1086
1087static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1088{
1089 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1090 kfree(qp->rqe_wr_id);
1091}
1092
1093static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1094 struct ib_qp_init_attr *attrs)
1095{
1096 struct qedr_device_attr *qattr = &dev->attr;
1097
1098 /* QP0... attrs->qp_type == IB_QPT_GSI */
1099 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1100 DP_DEBUG(dev, QEDR_MSG_QP,
1101 "create qp: unsupported qp type=0x%x requested\n",
1102 attrs->qp_type);
1103 return -EINVAL;
1104 }
1105
1106 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1107 DP_ERR(dev,
1108 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1109 attrs->cap.max_send_wr, qattr->max_sqe);
1110 return -EINVAL;
1111 }
1112
1113 if (attrs->cap.max_inline_data > qattr->max_inline) {
1114 DP_ERR(dev,
1115 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1116 attrs->cap.max_inline_data, qattr->max_inline);
1117 return -EINVAL;
1118 }
1119
1120 if (attrs->cap.max_send_sge > qattr->max_sge) {
1121 DP_ERR(dev,
1122 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1123 attrs->cap.max_send_sge, qattr->max_sge);
1124 return -EINVAL;
1125 }
1126
1127 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1128 DP_ERR(dev,
1129 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1130 attrs->cap.max_recv_sge, qattr->max_sge);
1131 return -EINVAL;
1132 }
1133
1134 /* Unprivileged user space cannot create special QP */
1135 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1136 DP_ERR(dev,
1137 "create qp: userspace can't create special QPs of type=0x%x\n",
1138 attrs->qp_type);
1139 return -EINVAL;
1140 }
1141
1142 return 0;
1143}
1144
1145static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1146 struct qedr_qp *qp)
1147{
1148 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1149 uresp->rq_icid = qp->icid;
1150}
1151
1152static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1153 struct qedr_qp *qp)
1154{
1155 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1156 uresp->sq_icid = qp->icid + 1;
1157}
1158
1159static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1160 struct qedr_qp *qp, struct ib_udata *udata)
1161{
1162 struct qedr_create_qp_uresp uresp;
1163 int rc;
1164
1165 memset(&uresp, 0, sizeof(uresp));
1166 qedr_copy_sq_uresp(&uresp, qp);
1167 qedr_copy_rq_uresp(&uresp, qp);
1168
1169 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1170 uresp.qp_id = qp->qp_id;
1171
1172 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1173 if (rc)
1174 DP_ERR(dev,
1175 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1176 qp->icid);
1177
1178 return rc;
1179}
1180
1181static void qedr_set_qp_init_params(struct qedr_dev *dev,
1182 struct qedr_qp *qp,
1183 struct qedr_pd *pd,
1184 struct ib_qp_init_attr *attrs)
1185{
1186 qp->pd = pd;
1187
1188 spin_lock_init(&qp->q_lock);
1189
1190 qp->qp_type = attrs->qp_type;
1191 qp->max_inline_data = attrs->cap.max_inline_data;
1192 qp->sq.max_sges = attrs->cap.max_send_sge;
1193 qp->state = QED_ROCE_QP_STATE_RESET;
1194 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1195 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1196 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1197 qp->dev = dev;
1198
1199 DP_DEBUG(dev, QEDR_MSG_QP,
1200 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1201 pd->pd_id, qp->qp_type, qp->max_inline_data,
1202 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1203 DP_DEBUG(dev, QEDR_MSG_QP,
1204 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1205 qp->sq.max_sges, qp->sq_cq->icid);
1206 qp->rq.max_sges = attrs->cap.max_recv_sge;
1207 DP_DEBUG(dev, QEDR_MSG_QP,
1208 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1209 qp->rq.max_sges, qp->rq_cq->icid);
1210}
1211
1212static inline void
1213qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
1214 struct qedr_create_qp_ureq *ureq)
1215{
1216 /* QP handle to be written in CQE */
1217 params->qp_handle_lo = ureq->qp_handle_lo;
1218 params->qp_handle_hi = ureq->qp_handle_hi;
1219}
1220
1221static inline void
1222qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1223{
1224 qp->sq.db = dev->db_addr +
1225 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1226 qp->sq.db_data.data.icid = qp->icid + 1;
1227}
1228
1229static inline void
1230qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1231{
1232 qp->rq.db = dev->db_addr +
1233 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1234 qp->rq.db_data.data.icid = qp->icid;
1235}
1236
1237static inline int
1238qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
1239 struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
1240{
1241 /* Allocate driver internal RQ array */
1242 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1243 GFP_KERNEL);
1244 if (!qp->rqe_wr_id)
1245 return -ENOMEM;
1246
1247 DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
1248
1249 return 0;
1250}
1251
1252static inline int
1253qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
1254 struct qedr_qp *qp,
1255 struct ib_qp_init_attr *attrs,
1256 struct qed_rdma_create_qp_in_params *params)
1257{
1258 u32 temp_max_wr;
1259
1260 /* Allocate driver internal SQ array */
1261 temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
1262 temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
1263
1264 /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1265 qp->sq.max_wr = (u16)temp_max_wr;
1266 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1267 GFP_KERNEL);
1268 if (!qp->wqe_wr_id)
1269 return -ENOMEM;
1270
1271 DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
1272
1273 /* QP handle to be written in CQE */
1274 params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
1275 params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
1276
1277 return 0;
1278}
1279
1280static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
1281 struct qedr_qp *qp,
1282 struct ib_qp_init_attr *attrs)
1283{
1284 u32 n_sq_elems, n_sq_entries;
1285 int rc;
1286
1287 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1288 * the ring. The ring should allow at least a single WR, even if the
1289 * user requested none, due to allocation issues.
1290 */
1291 n_sq_entries = attrs->cap.max_send_wr;
1292 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1293 n_sq_entries = max_t(u32, n_sq_entries, 1);
1294 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1295 rc = dev->ops->common->chain_alloc(dev->cdev,
1296 QED_CHAIN_USE_TO_PRODUCE,
1297 QED_CHAIN_MODE_PBL,
1298 QED_CHAIN_CNT_TYPE_U32,
1299 n_sq_elems,
1300 QEDR_SQE_ELEMENT_SIZE,
1301 &qp->sq.pbl);
1302 if (rc) {
1303 DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
1304 return rc;
1305 }
1306
1307 DP_DEBUG(dev, QEDR_MSG_SQ,
1308 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1309 qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
1310 n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
1311 return 0;
1312}
1313
1314static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
1315 struct qedr_qp *qp,
1316 struct ib_qp_init_attr *attrs)
1317{
1318 u32 n_rq_elems, n_rq_entries;
1319 int rc;
1320
1321 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1322 * the ring. There ring should allow at least a single WR, even if the
1323 * user requested none, due to allocation issues.
1324 */
1325 n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
1326 n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1327 rc = dev->ops->common->chain_alloc(dev->cdev,
1328 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1329 QED_CHAIN_MODE_PBL,
1330 QED_CHAIN_CNT_TYPE_U32,
1331 n_rq_elems,
1332 QEDR_RQE_ELEMENT_SIZE,
1333 &qp->rq.pbl);
1334
1335 if (rc) {
1336 DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
1337 return -ENOMEM;
1338 }
1339
1340 DP_DEBUG(dev, QEDR_MSG_RQ,
1341 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1342 qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
1343 n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
1344
1345 /* n_rq_entries < u16 so the casting is safe */
1346 qp->rq.max_wr = (u16)n_rq_entries;
1347
1348 return 0;
1349}
1350
1351static inline void
1352qedr_init_qp_in_params_sq(struct qedr_dev *dev,
1353 struct qedr_pd *pd,
1354 struct qedr_qp *qp,
1355 struct ib_qp_init_attr *attrs,
1356 struct ib_udata *udata,
1357 struct qed_rdma_create_qp_in_params *params)
1358{
1359 /* QP handle to be written in an async event */
1360 params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
1361 params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
1362
1363 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1364 params->fmr_and_reserved_lkey = !udata;
1365 params->pd = pd->pd_id;
1366 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1367 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1368 params->max_sq_sges = 0;
1369 params->stats_queue = 0;
1370
1371 if (udata) {
1372 params->sq_num_pages = qp->usq.pbl_info.num_pbes;
1373 params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1374 } else {
1375 params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1376 params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1377 }
1378}
1379
1380static inline void
1381qedr_init_qp_in_params_rq(struct qedr_qp *qp,
1382 struct ib_qp_init_attr *attrs,
1383 struct ib_udata *udata,
1384 struct qed_rdma_create_qp_in_params *params)
1385{
1386 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1387 params->srq_id = 0;
1388 params->use_srq = false;
1389
1390 if (udata) {
1391 params->rq_num_pages = qp->urq.pbl_info.num_pbes;
1392 params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1393 } else {
1394 params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1395 params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1396 }
1397}
1398
1399static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1400{
1401 DP_DEBUG(dev, QEDR_MSG_QP,
1402 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1403 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
1404 qp->urq.buf_len);
1405}
1406
1407static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
1408 struct qedr_dev *dev,
1409 struct qedr_qp *qp,
1410 struct qedr_create_qp_ureq *ureq)
1411{
1412 int rc;
1413
1414 /* SQ - read access only (0), dma sync not required (0) */
1415 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
1416 ureq->sq_len, 0, 0);
1417 if (rc)
1418 return rc;
1419
1420 /* RQ - read access only (0), dma sync not required (0) */
1421 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
1422 ureq->rq_len, 0, 0);
1423
1424 if (rc)
1425 qedr_cleanup_user_sq(dev, qp);
1426 return rc;
1427}
1428
1429static inline int
1430qedr_init_kernel_qp(struct qedr_dev *dev,
1431 struct qedr_qp *qp,
1432 struct ib_qp_init_attr *attrs,
1433 struct qed_rdma_create_qp_in_params *params)
1434{
1435 int rc;
1436
1437 rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
1438 if (rc) {
1439 DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
1440 return rc;
1441 }
1442
1443 rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
1444 if (rc) {
1445 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1446 DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
1447 return rc;
1448 }
1449
1450 rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
1451 if (rc) {
1452 qedr_cleanup_kernel_sq(dev, qp);
1453 DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
1454 return rc;
1455 }
1456
1457 rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
1458 if (rc) {
1459 DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
1460 qedr_cleanup_kernel_sq(dev, qp);
1461 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1462 return rc;
1463 }
1464
1465 return rc;
1466}
1467
1468struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1469 struct ib_qp_init_attr *attrs,
1470 struct ib_udata *udata)
1471{
1472 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1473 struct qed_rdma_create_qp_out_params out_params;
1474 struct qed_rdma_create_qp_in_params in_params;
1475 struct qedr_pd *pd = get_qedr_pd(ibpd);
1476 struct ib_ucontext *ib_ctx = NULL;
1477 struct qedr_ucontext *ctx = NULL;
1478 struct qedr_create_qp_ureq ureq;
1479 struct qedr_qp *qp;
1480 int rc = 0;
1481
1482 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1483 udata ? "user library" : "kernel", pd);
1484
1485 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1486 if (rc)
1487 return ERR_PTR(rc);
1488
1489 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1490 if (!qp)
1491 return ERR_PTR(-ENOMEM);
1492
1493 if (attrs->srq)
1494 return ERR_PTR(-EINVAL);
1495
1496 DP_DEBUG(dev, QEDR_MSG_QP,
1497 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1498 get_qedr_cq(attrs->send_cq),
1499 get_qedr_cq(attrs->send_cq)->icid,
1500 get_qedr_cq(attrs->recv_cq),
1501 get_qedr_cq(attrs->recv_cq)->icid);
1502
1503 qedr_set_qp_init_params(dev, qp, pd, attrs);
1504
Ram Amrani04886772016-10-10 13:15:38 +03001505 if (attrs->qp_type == IB_QPT_GSI) {
1506 if (udata) {
1507 DP_ERR(dev,
1508 "create qp: unexpected udata when creating GSI QP\n");
1509 goto err0;
1510 }
1511 return qedr_create_gsi_qp(dev, attrs, qp);
1512 }
1513
Ram Amranicecbcdd2016-10-10 13:15:34 +03001514 memset(&in_params, 0, sizeof(in_params));
1515
1516 if (udata) {
1517 if (!(udata && ibpd->uobject && ibpd->uobject->context))
1518 goto err0;
1519
1520 ib_ctx = ibpd->uobject->context;
1521 ctx = get_qedr_ucontext(ib_ctx);
1522
1523 memset(&ureq, 0, sizeof(ureq));
1524 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1525 DP_ERR(dev,
1526 "create qp: problem copying data from user space\n");
1527 goto err0;
1528 }
1529
1530 rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
1531 if (rc)
1532 goto err0;
1533
1534 qedr_init_qp_user_params(&in_params, &ureq);
1535 } else {
1536 rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
1537 if (rc)
1538 goto err0;
1539 }
1540
1541 qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
1542 qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
1543
1544 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1545 &in_params, &out_params);
1546
1547 if (!qp->qed_qp)
1548 goto err1;
1549
1550 qp->qp_id = out_params.qp_id;
1551 qp->icid = out_params.icid;
1552 qp->ibqp.qp_num = qp->qp_id;
1553
1554 if (udata) {
1555 rc = qedr_copy_qp_uresp(dev, qp, udata);
1556 if (rc)
1557 goto err2;
1558
1559 qedr_qp_user_print(dev, qp);
1560 } else {
1561 qedr_init_qp_kernel_doorbell_sq(dev, qp);
1562 qedr_init_qp_kernel_doorbell_rq(dev, qp);
1563 }
1564
1565 DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
1566 udata ? "user" : "kernel", qp);
1567
1568 return &qp->ibqp;
1569
1570err2:
1571 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1572 if (rc)
1573 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1574err1:
1575 if (udata) {
1576 qedr_cleanup_user_sq(dev, qp);
1577 qedr_cleanup_user_rq(dev, qp);
1578 } else {
1579 qedr_cleanup_kernel_sq(dev, qp);
1580 qedr_cleanup_kernel_rq(dev, qp);
1581 }
1582
1583err0:
1584 kfree(qp);
1585
1586 return ERR_PTR(-EFAULT);
1587}
1588
1589enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1590{
1591 switch (qp_state) {
1592 case QED_ROCE_QP_STATE_RESET:
1593 return IB_QPS_RESET;
1594 case QED_ROCE_QP_STATE_INIT:
1595 return IB_QPS_INIT;
1596 case QED_ROCE_QP_STATE_RTR:
1597 return IB_QPS_RTR;
1598 case QED_ROCE_QP_STATE_RTS:
1599 return IB_QPS_RTS;
1600 case QED_ROCE_QP_STATE_SQD:
1601 return IB_QPS_SQD;
1602 case QED_ROCE_QP_STATE_ERR:
1603 return IB_QPS_ERR;
1604 case QED_ROCE_QP_STATE_SQE:
1605 return IB_QPS_SQE;
1606 }
1607 return IB_QPS_ERR;
1608}
1609
1610enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
1611{
1612 switch (qp_state) {
1613 case IB_QPS_RESET:
1614 return QED_ROCE_QP_STATE_RESET;
1615 case IB_QPS_INIT:
1616 return QED_ROCE_QP_STATE_INIT;
1617 case IB_QPS_RTR:
1618 return QED_ROCE_QP_STATE_RTR;
1619 case IB_QPS_RTS:
1620 return QED_ROCE_QP_STATE_RTS;
1621 case IB_QPS_SQD:
1622 return QED_ROCE_QP_STATE_SQD;
1623 case IB_QPS_ERR:
1624 return QED_ROCE_QP_STATE_ERR;
1625 default:
1626 return QED_ROCE_QP_STATE_ERR;
1627 }
1628}
1629
1630static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1631{
1632 qed_chain_reset(&qph->pbl);
1633 qph->prod = 0;
1634 qph->cons = 0;
1635 qph->wqe_cons = 0;
1636 qph->db_data.data.value = cpu_to_le16(0);
1637}
1638
1639static int qedr_update_qp_state(struct qedr_dev *dev,
1640 struct qedr_qp *qp,
1641 enum qed_roce_qp_state new_state)
1642{
1643 int status = 0;
1644
1645 if (new_state == qp->state)
1646 return 1;
1647
1648 switch (qp->state) {
1649 case QED_ROCE_QP_STATE_RESET:
1650 switch (new_state) {
1651 case QED_ROCE_QP_STATE_INIT:
1652 qp->prev_wqe_size = 0;
1653 qedr_reset_qp_hwq_info(&qp->sq);
1654 qedr_reset_qp_hwq_info(&qp->rq);
1655 break;
1656 default:
1657 status = -EINVAL;
1658 break;
1659 };
1660 break;
1661 case QED_ROCE_QP_STATE_INIT:
1662 switch (new_state) {
1663 case QED_ROCE_QP_STATE_RTR:
1664 /* Update doorbell (in case post_recv was
1665 * done before move to RTR)
1666 */
1667 wmb();
1668 writel(qp->rq.db_data.raw, qp->rq.db);
1669 /* Make sure write takes effect */
1670 mmiowb();
1671 break;
1672 case QED_ROCE_QP_STATE_ERR:
1673 break;
1674 default:
1675 /* Invalid state change. */
1676 status = -EINVAL;
1677 break;
1678 };
1679 break;
1680 case QED_ROCE_QP_STATE_RTR:
1681 /* RTR->XXX */
1682 switch (new_state) {
1683 case QED_ROCE_QP_STATE_RTS:
1684 break;
1685 case QED_ROCE_QP_STATE_ERR:
1686 break;
1687 default:
1688 /* Invalid state change. */
1689 status = -EINVAL;
1690 break;
1691 };
1692 break;
1693 case QED_ROCE_QP_STATE_RTS:
1694 /* RTS->XXX */
1695 switch (new_state) {
1696 case QED_ROCE_QP_STATE_SQD:
1697 break;
1698 case QED_ROCE_QP_STATE_ERR:
1699 break;
1700 default:
1701 /* Invalid state change. */
1702 status = -EINVAL;
1703 break;
1704 };
1705 break;
1706 case QED_ROCE_QP_STATE_SQD:
1707 /* SQD->XXX */
1708 switch (new_state) {
1709 case QED_ROCE_QP_STATE_RTS:
1710 case QED_ROCE_QP_STATE_ERR:
1711 break;
1712 default:
1713 /* Invalid state change. */
1714 status = -EINVAL;
1715 break;
1716 };
1717 break;
1718 case QED_ROCE_QP_STATE_ERR:
1719 /* ERR->XXX */
1720 switch (new_state) {
1721 case QED_ROCE_QP_STATE_RESET:
1722 break;
1723 default:
1724 status = -EINVAL;
1725 break;
1726 };
1727 break;
1728 default:
1729 status = -EINVAL;
1730 break;
1731 };
1732
1733 return status;
1734}
1735
1736int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1737 int attr_mask, struct ib_udata *udata)
1738{
1739 struct qedr_qp *qp = get_qedr_qp(ibqp);
1740 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1741 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1742 enum ib_qp_state old_qp_state, new_qp_state;
1743 int rc = 0;
1744
1745 DP_DEBUG(dev, QEDR_MSG_QP,
1746 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1747 attr->qp_state);
1748
1749 old_qp_state = qedr_get_ibqp_state(qp->state);
1750 if (attr_mask & IB_QP_STATE)
1751 new_qp_state = attr->qp_state;
1752 else
1753 new_qp_state = old_qp_state;
1754
1755 if (!ib_modify_qp_is_ok
1756 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1757 IB_LINK_LAYER_ETHERNET)) {
1758 DP_ERR(dev,
1759 "modify qp: invalid attribute mask=0x%x specified for\n"
1760 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1761 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1762 new_qp_state);
1763 rc = -EINVAL;
1764 goto err;
1765 }
1766
1767 /* Translate the masks... */
1768 if (attr_mask & IB_QP_STATE) {
1769 SET_FIELD(qp_params.modify_flags,
1770 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1771 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1772 }
1773
1774 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1775 qp_params.sqd_async = true;
1776
1777 if (attr_mask & IB_QP_PKEY_INDEX) {
1778 SET_FIELD(qp_params.modify_flags,
1779 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1780 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1781 rc = -EINVAL;
1782 goto err;
1783 }
1784
1785 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1786 }
1787
1788 if (attr_mask & IB_QP_QKEY)
1789 qp->qkey = attr->qkey;
1790
1791 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1792 SET_FIELD(qp_params.modify_flags,
1793 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1794 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1795 IB_ACCESS_REMOTE_READ;
1796 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1797 IB_ACCESS_REMOTE_WRITE;
1798 qp_params.incoming_atomic_en = attr->qp_access_flags &
1799 IB_ACCESS_REMOTE_ATOMIC;
1800 }
1801
1802 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1803 if (attr_mask & IB_QP_PATH_MTU) {
1804 if (attr->path_mtu < IB_MTU_256 ||
1805 attr->path_mtu > IB_MTU_4096) {
1806 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1807 rc = -EINVAL;
1808 goto err;
1809 }
1810 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1811 ib_mtu_enum_to_int(iboe_get_mtu
1812 (dev->ndev->mtu)));
1813 }
1814
1815 if (!qp->mtu) {
1816 qp->mtu =
1817 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1818 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1819 }
1820
1821 SET_FIELD(qp_params.modify_flags,
1822 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1823
1824 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1825 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1826 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1827
1828 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1829
1830 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1831 if (rc) {
1832 DP_ERR(dev,
1833 "modify qp: problems with GID index %d (rc=%d)\n",
1834 attr->ah_attr.grh.sgid_index, rc);
1835 return rc;
1836 }
1837
1838 rc = qedr_get_dmac(dev, &attr->ah_attr,
1839 qp_params.remote_mac_addr);
1840 if (rc)
1841 return rc;
1842
1843 qp_params.use_local_mac = true;
1844 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1845
1846 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1847 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1848 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1849 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1850 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1851 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1852 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1853 qp_params.remote_mac_addr);
1854;
1855
1856 qp_params.mtu = qp->mtu;
1857 qp_params.lb_indication = false;
1858 }
1859
1860 if (!qp_params.mtu) {
1861 /* Stay with current MTU */
1862 if (qp->mtu)
1863 qp_params.mtu = qp->mtu;
1864 else
1865 qp_params.mtu =
1866 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1867 }
1868
1869 if (attr_mask & IB_QP_TIMEOUT) {
1870 SET_FIELD(qp_params.modify_flags,
1871 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1872
1873 qp_params.ack_timeout = attr->timeout;
1874 if (attr->timeout) {
1875 u32 temp;
1876
1877 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1878 /* FW requires [msec] */
1879 qp_params.ack_timeout = temp;
1880 } else {
1881 /* Infinite */
1882 qp_params.ack_timeout = 0;
1883 }
1884 }
1885 if (attr_mask & IB_QP_RETRY_CNT) {
1886 SET_FIELD(qp_params.modify_flags,
1887 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1888 qp_params.retry_cnt = attr->retry_cnt;
1889 }
1890
1891 if (attr_mask & IB_QP_RNR_RETRY) {
1892 SET_FIELD(qp_params.modify_flags,
1893 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1894 qp_params.rnr_retry_cnt = attr->rnr_retry;
1895 }
1896
1897 if (attr_mask & IB_QP_RQ_PSN) {
1898 SET_FIELD(qp_params.modify_flags,
1899 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1900 qp_params.rq_psn = attr->rq_psn;
1901 qp->rq_psn = attr->rq_psn;
1902 }
1903
1904 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1905 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1906 rc = -EINVAL;
1907 DP_ERR(dev,
1908 "unsupported max_rd_atomic=%d, supported=%d\n",
1909 attr->max_rd_atomic,
1910 dev->attr.max_qp_req_rd_atomic_resc);
1911 goto err;
1912 }
1913
1914 SET_FIELD(qp_params.modify_flags,
1915 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1916 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1917 }
1918
1919 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1920 SET_FIELD(qp_params.modify_flags,
1921 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1922 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1923 }
1924
1925 if (attr_mask & IB_QP_SQ_PSN) {
1926 SET_FIELD(qp_params.modify_flags,
1927 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1928 qp_params.sq_psn = attr->sq_psn;
1929 qp->sq_psn = attr->sq_psn;
1930 }
1931
1932 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1933 if (attr->max_dest_rd_atomic >
1934 dev->attr.max_qp_resp_rd_atomic_resc) {
1935 DP_ERR(dev,
1936 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1937 attr->max_dest_rd_atomic,
1938 dev->attr.max_qp_resp_rd_atomic_resc);
1939
1940 rc = -EINVAL;
1941 goto err;
1942 }
1943
1944 SET_FIELD(qp_params.modify_flags,
1945 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1946 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1947 }
1948
1949 if (attr_mask & IB_QP_DEST_QPN) {
1950 SET_FIELD(qp_params.modify_flags,
1951 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1952
1953 qp_params.dest_qp = attr->dest_qp_num;
1954 qp->dest_qp_num = attr->dest_qp_num;
1955 }
1956
1957 if (qp->qp_type != IB_QPT_GSI)
1958 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1959 qp->qed_qp, &qp_params);
1960
1961 if (attr_mask & IB_QP_STATE) {
1962 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
1963 qedr_update_qp_state(dev, qp, qp_params.new_state);
1964 qp->state = qp_params.new_state;
1965 }
1966
1967err:
1968 return rc;
1969}
1970
1971static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1972{
1973 int ib_qp_acc_flags = 0;
1974
1975 if (params->incoming_rdma_write_en)
1976 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1977 if (params->incoming_rdma_read_en)
1978 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
1979 if (params->incoming_atomic_en)
1980 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1981 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1982 return ib_qp_acc_flags;
1983}
1984
1985int qedr_query_qp(struct ib_qp *ibqp,
1986 struct ib_qp_attr *qp_attr,
1987 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1988{
1989 struct qed_rdma_query_qp_out_params params;
1990 struct qedr_qp *qp = get_qedr_qp(ibqp);
1991 struct qedr_dev *dev = qp->dev;
1992 int rc = 0;
1993
1994 memset(&params, 0, sizeof(params));
1995
1996 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
1997 if (rc)
1998 goto err;
1999
2000 memset(qp_attr, 0, sizeof(*qp_attr));
2001 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2002
2003 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2004 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2005 qp_attr->path_mtu = iboe_get_mtu(params.mtu);
2006 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2007 qp_attr->rq_psn = params.rq_psn;
2008 qp_attr->sq_psn = params.sq_psn;
2009 qp_attr->dest_qp_num = params.dest_qp;
2010
2011 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2012
2013 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2014 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2015 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2016 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2017 qp_attr->cap.max_inline_data = qp->max_inline_data;
2018 qp_init_attr->cap = qp_attr->cap;
2019
2020 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
2021 sizeof(qp_attr->ah_attr.grh.dgid.raw));
2022
2023 qp_attr->ah_attr.grh.flow_label = params.flow_label;
2024 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
2025 qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
2026 qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
2027
2028 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
2029 qp_attr->ah_attr.port_num = 1;
2030 qp_attr->ah_attr.sl = 0;
2031 qp_attr->timeout = params.timeout;
2032 qp_attr->rnr_retry = params.rnr_retry;
2033 qp_attr->retry_cnt = params.retry_cnt;
2034 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2035 qp_attr->pkey_index = params.pkey_index;
2036 qp_attr->port_num = 1;
2037 qp_attr->ah_attr.src_path_bits = 0;
2038 qp_attr->ah_attr.static_rate = 0;
2039 qp_attr->alt_pkey_index = 0;
2040 qp_attr->alt_port_num = 0;
2041 qp_attr->alt_timeout = 0;
2042 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2043
2044 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2045 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2046 qp_attr->max_rd_atomic = params.max_rd_atomic;
2047 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2048
2049 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2050 qp_attr->cap.max_inline_data);
2051
2052err:
2053 return rc;
2054}
2055
2056int qedr_destroy_qp(struct ib_qp *ibqp)
2057{
2058 struct qedr_qp *qp = get_qedr_qp(ibqp);
2059 struct qedr_dev *dev = qp->dev;
2060 struct ib_qp_attr attr;
2061 int attr_mask = 0;
2062 int rc = 0;
2063
2064 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2065 qp, qp->qp_type);
2066
2067 if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
2068 QED_ROCE_QP_STATE_INIT)) {
2069 attr.qp_state = IB_QPS_ERR;
2070 attr_mask |= IB_QP_STATE;
2071
2072 /* Change the QP state to ERROR */
2073 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2074 }
2075
2076 if (qp->qp_type != IB_QPT_GSI) {
2077 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2078 if (rc)
2079 return rc;
Ram Amrani04886772016-10-10 13:15:38 +03002080 } else {
2081 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002082 }
2083
2084 if (ibqp->uobject && ibqp->uobject->context) {
2085 qedr_cleanup_user_sq(dev, qp);
2086 qedr_cleanup_user_rq(dev, qp);
2087 } else {
2088 qedr_cleanup_kernel_sq(dev, qp);
2089 qedr_cleanup_kernel_rq(dev, qp);
2090 }
2091
2092 kfree(qp);
2093
2094 return rc;
2095}
Ram Amranie0290cc2016-10-10 13:15:35 +03002096
Moni Shoua477864c2016-11-23 08:23:24 +02002097struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
2098 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002099{
2100 struct qedr_ah *ah;
2101
2102 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2103 if (!ah)
2104 return ERR_PTR(-ENOMEM);
2105
2106 ah->attr = *attr;
2107
2108 return &ah->ibah;
2109}
2110
2111int qedr_destroy_ah(struct ib_ah *ibah)
2112{
2113 struct qedr_ah *ah = get_qedr_ah(ibah);
2114
2115 kfree(ah);
2116 return 0;
2117}
2118
Ram Amranie0290cc2016-10-10 13:15:35 +03002119static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2120{
2121 struct qedr_pbl *pbl, *tmp;
2122
2123 if (info->pbl_table)
2124 list_add_tail(&info->pbl_table->list_entry,
2125 &info->free_pbl_list);
2126
2127 if (!list_empty(&info->inuse_pbl_list))
2128 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2129
2130 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2131 list_del(&pbl->list_entry);
2132 qedr_free_pbl(dev, &info->pbl_info, pbl);
2133 }
2134}
2135
2136static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2137 size_t page_list_len, bool two_layered)
2138{
2139 struct qedr_pbl *tmp;
2140 int rc;
2141
2142 INIT_LIST_HEAD(&info->free_pbl_list);
2143 INIT_LIST_HEAD(&info->inuse_pbl_list);
2144
2145 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2146 page_list_len, two_layered);
2147 if (rc)
2148 goto done;
2149
2150 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2151 if (!info->pbl_table) {
2152 rc = -ENOMEM;
2153 goto done;
2154 }
2155
2156 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2157 &info->pbl_table->pa);
2158
2159 /* in usual case we use 2 PBLs, so we add one to free
2160 * list and allocating another one
2161 */
2162 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2163 if (!tmp) {
2164 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2165 goto done;
2166 }
2167
2168 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2169
2170 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2171
2172done:
2173 if (rc)
2174 free_mr_info(dev, info);
2175
2176 return rc;
2177}
2178
2179struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2180 u64 usr_addr, int acc, struct ib_udata *udata)
2181{
2182 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2183 struct qedr_mr *mr;
2184 struct qedr_pd *pd;
2185 int rc = -ENOMEM;
2186
2187 pd = get_qedr_pd(ibpd);
2188 DP_DEBUG(dev, QEDR_MSG_MR,
2189 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2190 pd->pd_id, start, len, usr_addr, acc);
2191
2192 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2193 return ERR_PTR(-EINVAL);
2194
2195 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2196 if (!mr)
2197 return ERR_PTR(rc);
2198
2199 mr->type = QEDR_MR_USER;
2200
2201 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2202 if (IS_ERR(mr->umem)) {
2203 rc = -EFAULT;
2204 goto err0;
2205 }
2206
2207 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2208 if (rc)
2209 goto err1;
2210
2211 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2212 &mr->info.pbl_info);
2213
2214 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2215 if (rc) {
2216 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2217 goto err1;
2218 }
2219
2220 /* Index only, 18 bit long, lkey = itid << 8 | key */
2221 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2222 mr->hw_mr.key = 0;
2223 mr->hw_mr.pd = pd->pd_id;
2224 mr->hw_mr.local_read = 1;
2225 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2226 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2227 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2228 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2229 mr->hw_mr.mw_bind = false;
2230 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2231 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2232 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2233 mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2234 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2235 mr->hw_mr.length = len;
2236 mr->hw_mr.vaddr = usr_addr;
2237 mr->hw_mr.zbva = false;
2238 mr->hw_mr.phy_mr = false;
2239 mr->hw_mr.dma_mr = false;
2240
2241 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2242 if (rc) {
2243 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2244 goto err2;
2245 }
2246
2247 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2248 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2249 mr->hw_mr.remote_atomic)
2250 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2251
2252 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2253 mr->ibmr.lkey);
2254 return &mr->ibmr;
2255
2256err2:
2257 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2258err1:
2259 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2260err0:
2261 kfree(mr);
2262 return ERR_PTR(rc);
2263}
2264
2265int qedr_dereg_mr(struct ib_mr *ib_mr)
2266{
2267 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2268 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2269 int rc = 0;
2270
2271 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2272 if (rc)
2273 return rc;
2274
2275 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2276
2277 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2278 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2279
2280 /* it could be user registered memory. */
2281 if (mr->umem)
2282 ib_umem_release(mr->umem);
2283
2284 kfree(mr);
2285
2286 return rc;
2287}
2288
2289struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
2290{
2291 struct qedr_pd *pd = get_qedr_pd(ibpd);
2292 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2293 struct qedr_mr *mr;
2294 int rc = -ENOMEM;
2295
2296 DP_DEBUG(dev, QEDR_MSG_MR,
2297 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2298 max_page_list_len);
2299
2300 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2301 if (!mr)
2302 return ERR_PTR(rc);
2303
2304 mr->dev = dev;
2305 mr->type = QEDR_MR_FRMR;
2306
2307 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2308 if (rc)
2309 goto err0;
2310
2311 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2312 if (rc) {
2313 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2314 goto err0;
2315 }
2316
2317 /* Index only, 18 bit long, lkey = itid << 8 | key */
2318 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2319 mr->hw_mr.key = 0;
2320 mr->hw_mr.pd = pd->pd_id;
2321 mr->hw_mr.local_read = 1;
2322 mr->hw_mr.local_write = 0;
2323 mr->hw_mr.remote_read = 0;
2324 mr->hw_mr.remote_write = 0;
2325 mr->hw_mr.remote_atomic = 0;
2326 mr->hw_mr.mw_bind = false;
2327 mr->hw_mr.pbl_ptr = 0;
2328 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2329 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2330 mr->hw_mr.fbo = 0;
2331 mr->hw_mr.length = 0;
2332 mr->hw_mr.vaddr = 0;
2333 mr->hw_mr.zbva = false;
2334 mr->hw_mr.phy_mr = true;
2335 mr->hw_mr.dma_mr = false;
2336
2337 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2338 if (rc) {
2339 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2340 goto err1;
2341 }
2342
2343 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2344 mr->ibmr.rkey = mr->ibmr.lkey;
2345
2346 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2347 return mr;
2348
2349err1:
2350 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2351err0:
2352 kfree(mr);
2353 return ERR_PTR(rc);
2354}
2355
2356struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2357 enum ib_mr_type mr_type, u32 max_num_sg)
2358{
2359 struct qedr_dev *dev;
2360 struct qedr_mr *mr;
2361
2362 if (mr_type != IB_MR_TYPE_MEM_REG)
2363 return ERR_PTR(-EINVAL);
2364
2365 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2366
2367 if (IS_ERR(mr))
2368 return ERR_PTR(-EINVAL);
2369
2370 dev = mr->dev;
2371
2372 return &mr->ibmr;
2373}
2374
2375static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2376{
2377 struct qedr_mr *mr = get_qedr_mr(ibmr);
2378 struct qedr_pbl *pbl_table;
2379 struct regpair *pbe;
2380 u32 pbes_in_page;
2381
2382 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2383 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2384 return -ENOMEM;
2385 }
2386
2387 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2388 mr->npages, addr);
2389
2390 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2391 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2392 pbe = (struct regpair *)pbl_table->va;
2393 pbe += mr->npages % pbes_in_page;
2394 pbe->lo = cpu_to_le32((u32)addr);
2395 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2396
2397 mr->npages++;
2398
2399 return 0;
2400}
2401
2402static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2403{
2404 int work = info->completed - info->completed_handled - 1;
2405
2406 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2407 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2408 struct qedr_pbl *pbl;
2409
2410 /* Free all the page list that are possible to be freed
2411 * (all the ones that were invalidated), under the assumption
2412 * that if an FMR was completed successfully that means that
2413 * if there was an invalidate operation before it also ended
2414 */
2415 pbl = list_first_entry(&info->inuse_pbl_list,
2416 struct qedr_pbl, list_entry);
2417 list_del(&pbl->list_entry);
2418 list_add_tail(&pbl->list_entry, &info->free_pbl_list);
2419 info->completed_handled++;
2420 }
2421}
2422
2423int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2424 int sg_nents, unsigned int *sg_offset)
2425{
2426 struct qedr_mr *mr = get_qedr_mr(ibmr);
2427
2428 mr->npages = 0;
2429
2430 handle_completed_mrs(mr->dev, &mr->info);
2431 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2432}
2433
2434struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2435{
2436 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2437 struct qedr_pd *pd = get_qedr_pd(ibpd);
2438 struct qedr_mr *mr;
2439 int rc;
2440
2441 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2442 if (!mr)
2443 return ERR_PTR(-ENOMEM);
2444
2445 mr->type = QEDR_MR_DMA;
2446
2447 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2448 if (rc) {
2449 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2450 goto err1;
2451 }
2452
2453 /* index only, 18 bit long, lkey = itid << 8 | key */
2454 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2455 mr->hw_mr.pd = pd->pd_id;
2456 mr->hw_mr.local_read = 1;
2457 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2458 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2459 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2460 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2461 mr->hw_mr.dma_mr = true;
2462
2463 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2464 if (rc) {
2465 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2466 goto err2;
2467 }
2468
2469 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2470 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2471 mr->hw_mr.remote_atomic)
2472 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2473
2474 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2475 return &mr->ibmr;
2476
2477err2:
2478 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2479err1:
2480 kfree(mr);
2481 return ERR_PTR(rc);
2482}
Ram Amraniafa0e132016-10-10 13:15:36 +03002483
2484static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2485{
2486 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2487}
2488
2489static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2490{
2491 int i, len = 0;
2492
2493 for (i = 0; i < num_sge; i++)
2494 len += sg_list[i].length;
2495
2496 return len;
2497}
2498
2499static void swap_wqe_data64(u64 *p)
2500{
2501 int i;
2502
2503 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2504 *p = cpu_to_be64(cpu_to_le64(*p));
2505}
2506
2507static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2508 struct qedr_qp *qp, u8 *wqe_size,
2509 struct ib_send_wr *wr,
2510 struct ib_send_wr **bad_wr, u8 *bits,
2511 u8 bit)
2512{
2513 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2514 char *seg_prt, *wqe;
2515 int i, seg_siz;
2516
2517 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2518 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2519 *bad_wr = wr;
2520 return 0;
2521 }
2522
2523 if (!data_size)
2524 return data_size;
2525
2526 *bits |= bit;
2527
2528 seg_prt = NULL;
2529 wqe = NULL;
2530 seg_siz = 0;
2531
2532 /* Copy data inline */
2533 for (i = 0; i < wr->num_sge; i++) {
2534 u32 len = wr->sg_list[i].length;
2535 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2536
2537 while (len > 0) {
2538 u32 cur;
2539
2540 /* New segment required */
2541 if (!seg_siz) {
2542 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2543 seg_prt = wqe;
2544 seg_siz = sizeof(struct rdma_sq_common_wqe);
2545 (*wqe_size)++;
2546 }
2547
2548 /* Calculate currently allowed length */
2549 cur = min_t(u32, len, seg_siz);
2550 memcpy(seg_prt, src, cur);
2551
2552 /* Update segment variables */
2553 seg_prt += cur;
2554 seg_siz -= cur;
2555
2556 /* Update sge variables */
2557 src += cur;
2558 len -= cur;
2559
2560 /* Swap fully-completed segments */
2561 if (!seg_siz)
2562 swap_wqe_data64((u64 *)wqe);
2563 }
2564 }
2565
2566 /* swap last not completed segment */
2567 if (seg_siz)
2568 swap_wqe_data64((u64 *)wqe);
2569
2570 return data_size;
2571}
2572
2573#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2574 do { \
2575 DMA_REGPAIR_LE(sge->addr, vaddr); \
2576 (sge)->length = cpu_to_le32(vlength); \
2577 (sge)->flags = cpu_to_le32(vflags); \
2578 } while (0)
2579
2580#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2581 do { \
2582 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2583 (hdr)->num_sges = num_sge; \
2584 } while (0)
2585
2586#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2587 do { \
2588 DMA_REGPAIR_LE(sge->addr, vaddr); \
2589 (sge)->length = cpu_to_le32(vlength); \
2590 (sge)->l_key = cpu_to_le32(vlkey); \
2591 } while (0)
2592
2593static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2594 struct ib_send_wr *wr)
2595{
2596 u32 data_size = 0;
2597 int i;
2598
2599 for (i = 0; i < wr->num_sge; i++) {
2600 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2601
2602 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2603 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2604 sge->length = cpu_to_le32(wr->sg_list[i].length);
2605 data_size += wr->sg_list[i].length;
2606 }
2607
2608 if (wqe_size)
2609 *wqe_size += wr->num_sge;
2610
2611 return data_size;
2612}
2613
2614static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2615 struct qedr_qp *qp,
2616 struct rdma_sq_rdma_wqe_1st *rwqe,
2617 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2618 struct ib_send_wr *wr,
2619 struct ib_send_wr **bad_wr)
2620{
2621 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2622 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2623
2624 if (wr->send_flags & IB_SEND_INLINE) {
2625 u8 flags = 0;
2626
2627 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2628 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2629 bad_wr, &rwqe->flags, flags);
2630 }
2631
2632 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2633}
2634
2635static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2636 struct qedr_qp *qp,
2637 struct rdma_sq_send_wqe_1st *swqe,
2638 struct rdma_sq_send_wqe_2st *swqe2,
2639 struct ib_send_wr *wr,
2640 struct ib_send_wr **bad_wr)
2641{
2642 memset(swqe2, 0, sizeof(*swqe2));
2643 if (wr->send_flags & IB_SEND_INLINE) {
2644 u8 flags = 0;
2645
2646 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2647 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2648 bad_wr, &swqe->flags, flags);
2649 }
2650
2651 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2652}
2653
2654static int qedr_prepare_reg(struct qedr_qp *qp,
2655 struct rdma_sq_fmr_wqe_1st *fwqe1,
2656 struct ib_reg_wr *wr)
2657{
2658 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2659 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2660
2661 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2662 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2663 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2664 fwqe1->l_key = wr->key;
2665
2666 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2667 !!(wr->access & IB_ACCESS_REMOTE_READ));
2668 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2669 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2670 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2671 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2672 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2673 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2674 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2675 fwqe2->fmr_ctrl = 0;
2676
2677 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2678 ilog2(mr->ibmr.page_size) - 12);
2679
2680 fwqe2->length_hi = 0;
2681 fwqe2->length_lo = mr->ibmr.length;
2682 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2683 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2684
2685 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2686
2687 return 0;
2688}
2689
2690enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2691{
2692 switch (opcode) {
2693 case IB_WR_RDMA_WRITE:
2694 case IB_WR_RDMA_WRITE_WITH_IMM:
2695 return IB_WC_RDMA_WRITE;
2696 case IB_WR_SEND_WITH_IMM:
2697 case IB_WR_SEND:
2698 case IB_WR_SEND_WITH_INV:
2699 return IB_WC_SEND;
2700 case IB_WR_RDMA_READ:
2701 return IB_WC_RDMA_READ;
2702 case IB_WR_ATOMIC_CMP_AND_SWP:
2703 return IB_WC_COMP_SWAP;
2704 case IB_WR_ATOMIC_FETCH_AND_ADD:
2705 return IB_WC_FETCH_ADD;
2706 case IB_WR_REG_MR:
2707 return IB_WC_REG_MR;
2708 case IB_WR_LOCAL_INV:
2709 return IB_WC_LOCAL_INV;
2710 default:
2711 return IB_WC_SEND;
2712 }
2713}
2714
2715inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2716{
2717 int wq_is_full, err_wr, pbl_is_full;
2718 struct qedr_dev *dev = qp->dev;
2719
2720 /* prevent SQ overflow and/or processing of a bad WR */
2721 err_wr = wr->num_sge > qp->sq.max_sges;
2722 wq_is_full = qedr_wq_is_full(&qp->sq);
2723 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2724 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2725 if (wq_is_full || err_wr || pbl_is_full) {
2726 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2727 DP_ERR(dev,
2728 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2729 qp);
2730 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2731 }
2732
2733 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2734 DP_ERR(dev,
2735 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2736 qp);
2737 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2738 }
2739
2740 if (pbl_is_full &&
2741 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2742 DP_ERR(dev,
2743 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2744 qp);
2745 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2746 }
2747 return false;
2748 }
2749 return true;
2750}
2751
2752int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2753 struct ib_send_wr **bad_wr)
2754{
2755 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2756 struct qedr_qp *qp = get_qedr_qp(ibqp);
2757 struct rdma_sq_atomic_wqe_1st *awqe1;
2758 struct rdma_sq_atomic_wqe_2nd *awqe2;
2759 struct rdma_sq_atomic_wqe_3rd *awqe3;
2760 struct rdma_sq_send_wqe_2st *swqe2;
2761 struct rdma_sq_local_inv_wqe *iwqe;
2762 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2763 struct rdma_sq_send_wqe_1st *swqe;
2764 struct rdma_sq_rdma_wqe_1st *rwqe;
2765 struct rdma_sq_fmr_wqe_1st *fwqe1;
2766 struct rdma_sq_common_wqe *wqe;
2767 u32 length;
2768 int rc = 0;
2769 bool comp;
2770
2771 if (!qedr_can_post_send(qp, wr)) {
2772 *bad_wr = wr;
2773 return -ENOMEM;
2774 }
2775
2776 wqe = qed_chain_produce(&qp->sq.pbl);
2777 qp->wqe_wr_id[qp->sq.prod].signaled =
2778 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2779
2780 wqe->flags = 0;
2781 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2782 !!(wr->send_flags & IB_SEND_SOLICITED));
2783 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2784 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2785 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2786 !!(wr->send_flags & IB_SEND_FENCE));
2787 wqe->prev_wqe_size = qp->prev_wqe_size;
2788
2789 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2790
2791 switch (wr->opcode) {
2792 case IB_WR_SEND_WITH_IMM:
2793 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2794 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2795 swqe->wqe_size = 2;
2796 swqe2 = qed_chain_produce(&qp->sq.pbl);
2797
2798 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2799 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2800 wr, bad_wr);
2801 swqe->length = cpu_to_le32(length);
2802 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2803 qp->prev_wqe_size = swqe->wqe_size;
2804 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2805 break;
2806 case IB_WR_SEND:
2807 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2808 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2809
2810 swqe->wqe_size = 2;
2811 swqe2 = qed_chain_produce(&qp->sq.pbl);
2812 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2813 wr, bad_wr);
2814 swqe->length = cpu_to_le32(length);
2815 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2816 qp->prev_wqe_size = swqe->wqe_size;
2817 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2818 break;
2819 case IB_WR_SEND_WITH_INV:
2820 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2821 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2822 swqe2 = qed_chain_produce(&qp->sq.pbl);
2823 swqe->wqe_size = 2;
2824 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2825 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2826 wr, bad_wr);
2827 swqe->length = cpu_to_le32(length);
2828 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2829 qp->prev_wqe_size = swqe->wqe_size;
2830 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2831 break;
2832
2833 case IB_WR_RDMA_WRITE_WITH_IMM:
2834 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2835 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2836
2837 rwqe->wqe_size = 2;
2838 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2839 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2840 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2841 wr, bad_wr);
2842 rwqe->length = cpu_to_le32(length);
2843 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2844 qp->prev_wqe_size = rwqe->wqe_size;
2845 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2846 break;
2847 case IB_WR_RDMA_WRITE:
2848 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2849 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2850
2851 rwqe->wqe_size = 2;
2852 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2853 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2854 wr, bad_wr);
2855 rwqe->length = cpu_to_le32(length);
2856 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2857 qp->prev_wqe_size = rwqe->wqe_size;
2858 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2859 break;
2860 case IB_WR_RDMA_READ_WITH_INV:
2861 DP_ERR(dev,
2862 "RDMA READ WITH INVALIDATE not supported\n");
2863 *bad_wr = wr;
2864 rc = -EINVAL;
2865 break;
2866
2867 case IB_WR_RDMA_READ:
2868 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2869 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2870
2871 rwqe->wqe_size = 2;
2872 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2873 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2874 wr, bad_wr);
2875 rwqe->length = cpu_to_le32(length);
2876 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2877 qp->prev_wqe_size = rwqe->wqe_size;
2878 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2879 break;
2880
2881 case IB_WR_ATOMIC_CMP_AND_SWP:
2882 case IB_WR_ATOMIC_FETCH_AND_ADD:
2883 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2884 awqe1->wqe_size = 4;
2885
2886 awqe2 = qed_chain_produce(&qp->sq.pbl);
2887 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2888 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2889
2890 awqe3 = qed_chain_produce(&qp->sq.pbl);
2891
2892 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2893 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2894 DMA_REGPAIR_LE(awqe3->swap_data,
2895 atomic_wr(wr)->compare_add);
2896 } else {
2897 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2898 DMA_REGPAIR_LE(awqe3->swap_data,
2899 atomic_wr(wr)->swap);
2900 DMA_REGPAIR_LE(awqe3->cmp_data,
2901 atomic_wr(wr)->compare_add);
2902 }
2903
2904 qedr_prepare_sq_sges(qp, NULL, wr);
2905
2906 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2907 qp->prev_wqe_size = awqe1->wqe_size;
2908 break;
2909
2910 case IB_WR_LOCAL_INV:
2911 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2912 iwqe->wqe_size = 1;
2913
2914 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2915 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2916 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2917 qp->prev_wqe_size = iwqe->wqe_size;
2918 break;
2919 case IB_WR_REG_MR:
2920 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2921 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2922 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2923 fwqe1->wqe_size = 2;
2924
2925 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2926 if (rc) {
2927 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2928 *bad_wr = wr;
2929 break;
2930 }
2931
2932 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2933 qp->prev_wqe_size = fwqe1->wqe_size;
2934 break;
2935 default:
2936 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2937 rc = -EINVAL;
2938 *bad_wr = wr;
2939 break;
2940 }
2941
2942 if (*bad_wr) {
2943 u16 value;
2944
2945 /* Restore prod to its position before
2946 * this WR was processed
2947 */
2948 value = le16_to_cpu(qp->sq.db_data.data.value);
2949 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2950
2951 /* Restore prev_wqe_size */
2952 qp->prev_wqe_size = wqe->prev_wqe_size;
2953 rc = -EINVAL;
2954 DP_ERR(dev, "POST SEND FAILED\n");
2955 }
2956
2957 return rc;
2958}
2959
2960int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2961 struct ib_send_wr **bad_wr)
2962{
2963 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2964 struct qedr_qp *qp = get_qedr_qp(ibqp);
2965 unsigned long flags;
2966 int rc = 0;
2967
2968 *bad_wr = NULL;
2969
Ram Amrani04886772016-10-10 13:15:38 +03002970 if (qp->qp_type == IB_QPT_GSI)
2971 return qedr_gsi_post_send(ibqp, wr, bad_wr);
2972
Ram Amraniafa0e132016-10-10 13:15:36 +03002973 spin_lock_irqsave(&qp->q_lock, flags);
2974
2975 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
2976 (qp->state == QED_ROCE_QP_STATE_ERR)) {
2977 spin_unlock_irqrestore(&qp->q_lock, flags);
2978 *bad_wr = wr;
2979 DP_DEBUG(dev, QEDR_MSG_CQ,
2980 "QP in wrong state! QP icid=0x%x state %d\n",
2981 qp->icid, qp->state);
2982 return -EINVAL;
2983 }
2984
2985 if (!wr) {
2986 DP_ERR(dev, "Got an empty post send.\n");
2987 return -EINVAL;
2988 }
2989
2990 while (wr) {
2991 rc = __qedr_post_send(ibqp, wr, bad_wr);
2992 if (rc)
2993 break;
2994
2995 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
2996
2997 qedr_inc_sw_prod(&qp->sq);
2998
2999 qp->sq.db_data.data.value++;
3000
3001 wr = wr->next;
3002 }
3003
3004 /* Trigger doorbell
3005 * If there was a failure in the first WR then it will be triggered in
3006 * vane. However this is not harmful (as long as the producer value is
3007 * unchanged). For performance reasons we avoid checking for this
3008 * redundant doorbell.
3009 */
3010 wmb();
3011 writel(qp->sq.db_data.raw, qp->sq.db);
3012
3013 /* Make sure write sticks */
3014 mmiowb();
3015
3016 spin_unlock_irqrestore(&qp->q_lock, flags);
3017
3018 return rc;
3019}
3020
3021int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3022 struct ib_recv_wr **bad_wr)
3023{
3024 struct qedr_qp *qp = get_qedr_qp(ibqp);
3025 struct qedr_dev *dev = qp->dev;
3026 unsigned long flags;
3027 int status = 0;
3028
Ram Amrani04886772016-10-10 13:15:38 +03003029 if (qp->qp_type == IB_QPT_GSI)
3030 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3031
Ram Amraniafa0e132016-10-10 13:15:36 +03003032 spin_lock_irqsave(&qp->q_lock, flags);
3033
3034 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
3035 (qp->state == QED_ROCE_QP_STATE_ERR)) {
3036 spin_unlock_irqrestore(&qp->q_lock, flags);
3037 *bad_wr = wr;
3038 return -EINVAL;
3039 }
3040
3041 while (wr) {
3042 int i;
3043
3044 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3045 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3046 wr->num_sge > qp->rq.max_sges) {
3047 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3048 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3049 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3050 qp->rq.max_sges);
3051 status = -ENOMEM;
3052 *bad_wr = wr;
3053 break;
3054 }
3055 for (i = 0; i < wr->num_sge; i++) {
3056 u32 flags = 0;
3057 struct rdma_rq_sge *rqe =
3058 qed_chain_produce(&qp->rq.pbl);
3059
3060 /* First one must include the number
3061 * of SGE in the list
3062 */
3063 if (!i)
3064 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3065 wr->num_sge);
3066
3067 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3068 wr->sg_list[i].lkey);
3069
3070 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3071 wr->sg_list[i].length, flags);
3072 }
3073
3074 /* Special case of no sges. FW requires between 1-4 sges...
3075 * in this case we need to post 1 sge with length zero. this is
3076 * because rdma write with immediate consumes an RQ.
3077 */
3078 if (!wr->num_sge) {
3079 u32 flags = 0;
3080 struct rdma_rq_sge *rqe =
3081 qed_chain_produce(&qp->rq.pbl);
3082
3083 /* First one must include the number
3084 * of SGE in the list
3085 */
3086 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3087 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3088
3089 RQ_SGE_SET(rqe, 0, 0, flags);
3090 i = 1;
3091 }
3092
3093 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3094 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3095
3096 qedr_inc_sw_prod(&qp->rq);
3097
3098 /* Flush all the writes before signalling doorbell */
3099 wmb();
3100
3101 qp->rq.db_data.data.value++;
3102
3103 writel(qp->rq.db_data.raw, qp->rq.db);
3104
3105 /* Make sure write sticks */
3106 mmiowb();
3107
3108 wr = wr->next;
3109 }
3110
3111 spin_unlock_irqrestore(&qp->q_lock, flags);
3112
3113 return status;
3114}
3115
3116static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3117{
3118 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3119
3120 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3121 cq->pbl_toggle;
3122}
3123
3124static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3125{
3126 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3127 struct qedr_qp *qp;
3128
3129 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3130 resp_cqe->qp_handle.lo,
3131 u64);
3132 return qp;
3133}
3134
3135static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3136{
3137 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3138
3139 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3140}
3141
3142/* Return latest CQE (needs processing) */
3143static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3144{
3145 return cq->latest_cqe;
3146}
3147
3148/* In fmr we need to increase the number of fmr completed counter for the fmr
3149 * algorithm determining whether we can free a pbl or not.
3150 * we need to perform this whether the work request was signaled or not. for
3151 * this purpose we call this function from the condition that checks if a wr
3152 * should be skipped, to make sure we don't miss it ( possibly this fmr
3153 * operation was not signalted)
3154 */
3155static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3156{
3157 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3158 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3159}
3160
3161static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3162 struct qedr_cq *cq, int num_entries,
3163 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3164 int force)
3165{
3166 u16 cnt = 0;
3167
3168 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3169 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3170 qedr_chk_if_fmr(qp);
3171 /* skip WC */
3172 goto next_cqe;
3173 }
3174
3175 /* fill WC */
3176 wc->status = status;
3177 wc->wc_flags = 0;
3178 wc->src_qp = qp->id;
3179 wc->qp = &qp->ibqp;
3180
3181 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3182 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3183
3184 switch (wc->opcode) {
3185 case IB_WC_RDMA_WRITE:
3186 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3187 break;
3188 case IB_WC_COMP_SWAP:
3189 case IB_WC_FETCH_ADD:
3190 wc->byte_len = 8;
3191 break;
3192 case IB_WC_REG_MR:
3193 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3194 break;
3195 default:
3196 break;
3197 }
3198
3199 num_entries--;
3200 wc++;
3201 cnt++;
3202next_cqe:
3203 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3204 qed_chain_consume(&qp->sq.pbl);
3205 qedr_inc_sw_cons(&qp->sq);
3206 }
3207
3208 return cnt;
3209}
3210
3211static int qedr_poll_cq_req(struct qedr_dev *dev,
3212 struct qedr_qp *qp, struct qedr_cq *cq,
3213 int num_entries, struct ib_wc *wc,
3214 struct rdma_cqe_requester *req)
3215{
3216 int cnt = 0;
3217
3218 switch (req->status) {
3219 case RDMA_CQE_REQ_STS_OK:
3220 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3221 IB_WC_SUCCESS, 0);
3222 break;
3223 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3224 DP_ERR(dev,
3225 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3226 cq->icid, qp->icid);
3227 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3228 IB_WC_WR_FLUSH_ERR, 0);
3229 break;
3230 default:
3231 /* process all WQE before the cosumer */
3232 qp->state = QED_ROCE_QP_STATE_ERR;
3233 cnt = process_req(dev, qp, cq, num_entries, wc,
3234 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3235 wc += cnt;
3236 /* if we have extra WC fill it with actual error info */
3237 if (cnt < num_entries) {
3238 enum ib_wc_status wc_status;
3239
3240 switch (req->status) {
3241 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3242 DP_ERR(dev,
3243 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3244 cq->icid, qp->icid);
3245 wc_status = IB_WC_BAD_RESP_ERR;
3246 break;
3247 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3248 DP_ERR(dev,
3249 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3250 cq->icid, qp->icid);
3251 wc_status = IB_WC_LOC_LEN_ERR;
3252 break;
3253 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3254 DP_ERR(dev,
3255 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3256 cq->icid, qp->icid);
3257 wc_status = IB_WC_LOC_QP_OP_ERR;
3258 break;
3259 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3260 DP_ERR(dev,
3261 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3262 cq->icid, qp->icid);
3263 wc_status = IB_WC_LOC_PROT_ERR;
3264 break;
3265 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3266 DP_ERR(dev,
3267 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3268 cq->icid, qp->icid);
3269 wc_status = IB_WC_MW_BIND_ERR;
3270 break;
3271 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3272 DP_ERR(dev,
3273 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3274 cq->icid, qp->icid);
3275 wc_status = IB_WC_REM_INV_REQ_ERR;
3276 break;
3277 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3278 DP_ERR(dev,
3279 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3280 cq->icid, qp->icid);
3281 wc_status = IB_WC_REM_ACCESS_ERR;
3282 break;
3283 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3284 DP_ERR(dev,
3285 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3286 cq->icid, qp->icid);
3287 wc_status = IB_WC_REM_OP_ERR;
3288 break;
3289 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3290 DP_ERR(dev,
3291 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3292 cq->icid, qp->icid);
3293 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3294 break;
3295 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3296 DP_ERR(dev,
3297 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3298 cq->icid, qp->icid);
3299 wc_status = IB_WC_RETRY_EXC_ERR;
3300 break;
3301 default:
3302 DP_ERR(dev,
3303 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3304 cq->icid, qp->icid);
3305 wc_status = IB_WC_GENERAL_ERR;
3306 }
3307 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3308 wc_status, 1);
3309 }
3310 }
3311
3312 return cnt;
3313}
3314
3315static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3316 struct qedr_cq *cq, struct ib_wc *wc,
3317 struct rdma_cqe_responder *resp, u64 wr_id)
3318{
3319 enum ib_wc_status wc_status = IB_WC_SUCCESS;
3320 u8 flags;
3321
3322 wc->opcode = IB_WC_RECV;
3323 wc->wc_flags = 0;
3324
3325 switch (resp->status) {
3326 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3327 wc_status = IB_WC_LOC_ACCESS_ERR;
3328 break;
3329 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3330 wc_status = IB_WC_LOC_LEN_ERR;
3331 break;
3332 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3333 wc_status = IB_WC_LOC_QP_OP_ERR;
3334 break;
3335 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3336 wc_status = IB_WC_LOC_PROT_ERR;
3337 break;
3338 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3339 wc_status = IB_WC_MW_BIND_ERR;
3340 break;
3341 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3342 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3343 break;
3344 case RDMA_CQE_RESP_STS_OK:
3345 wc_status = IB_WC_SUCCESS;
3346 wc->byte_len = le32_to_cpu(resp->length);
3347
3348 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3349
3350 if (flags == QEDR_RESP_RDMA_IMM)
3351 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3352
3353 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3354 wc->ex.imm_data =
3355 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3356 wc->wc_flags |= IB_WC_WITH_IMM;
3357 }
3358 break;
3359 default:
3360 wc->status = IB_WC_GENERAL_ERR;
3361 DP_ERR(dev, "Invalid CQE status detected\n");
3362 }
3363
3364 /* fill WC */
3365 wc->status = wc_status;
3366 wc->src_qp = qp->id;
3367 wc->qp = &qp->ibqp;
3368 wc->wr_id = wr_id;
3369}
3370
3371static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3372 struct qedr_cq *cq, struct ib_wc *wc,
3373 struct rdma_cqe_responder *resp)
3374{
3375 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3376
3377 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3378
3379 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3380 qed_chain_consume(&qp->rq.pbl);
3381 qedr_inc_sw_cons(&qp->rq);
3382
3383 return 1;
3384}
3385
3386static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3387 int num_entries, struct ib_wc *wc, u16 hw_cons)
3388{
3389 u16 cnt = 0;
3390
3391 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3392 /* fill WC */
3393 wc->status = IB_WC_WR_FLUSH_ERR;
3394 wc->wc_flags = 0;
3395 wc->src_qp = qp->id;
3396 wc->byte_len = 0;
3397 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3398 wc->qp = &qp->ibqp;
3399 num_entries--;
3400 wc++;
3401 cnt++;
3402 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3403 qed_chain_consume(&qp->rq.pbl);
3404 qedr_inc_sw_cons(&qp->rq);
3405 }
3406
3407 return cnt;
3408}
3409
3410static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3411 struct rdma_cqe_responder *resp, int *update)
3412{
3413 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3414 consume_cqe(cq);
3415 *update |= 1;
3416 }
3417}
3418
3419static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3420 struct qedr_cq *cq, int num_entries,
3421 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3422 int *update)
3423{
3424 int cnt;
3425
3426 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3427 cnt = process_resp_flush(qp, cq, num_entries, wc,
3428 resp->rq_cons);
3429 try_consume_resp_cqe(cq, qp, resp, update);
3430 } else {
3431 cnt = process_resp_one(dev, qp, cq, wc, resp);
3432 consume_cqe(cq);
3433 *update |= 1;
3434 }
3435
3436 return cnt;
3437}
3438
3439static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3440 struct rdma_cqe_requester *req, int *update)
3441{
3442 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3443 consume_cqe(cq);
3444 *update |= 1;
3445 }
3446}
3447
3448int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3449{
3450 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3451 struct qedr_cq *cq = get_qedr_cq(ibcq);
3452 union rdma_cqe *cqe = cq->latest_cqe;
3453 u32 old_cons, new_cons;
3454 unsigned long flags;
3455 int update = 0;
3456 int done = 0;
3457
Ram Amrani04886772016-10-10 13:15:38 +03003458 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3459 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3460
Ram Amraniafa0e132016-10-10 13:15:36 +03003461 spin_lock_irqsave(&cq->cq_lock, flags);
3462 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3463 while (num_entries && is_valid_cqe(cq, cqe)) {
3464 struct qedr_qp *qp;
3465 int cnt = 0;
3466
3467 /* prevent speculative reads of any field of CQE */
3468 rmb();
3469
3470 qp = cqe_get_qp(cqe);
3471 if (!qp) {
3472 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3473 break;
3474 }
3475
3476 wc->qp = &qp->ibqp;
3477
3478 switch (cqe_get_type(cqe)) {
3479 case RDMA_CQE_TYPE_REQUESTER:
3480 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3481 &cqe->req);
3482 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3483 break;
3484 case RDMA_CQE_TYPE_RESPONDER_RQ:
3485 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3486 &cqe->resp, &update);
3487 break;
3488 case RDMA_CQE_TYPE_INVALID:
3489 default:
3490 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3491 cqe_get_type(cqe));
3492 }
3493 num_entries -= cnt;
3494 wc += cnt;
3495 done += cnt;
3496
3497 cqe = get_cqe(cq);
3498 }
3499 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3500
3501 cq->cq_cons += new_cons - old_cons;
3502
3503 if (update)
3504 /* doorbell notifies abount latest VALID entry,
3505 * but chain already point to the next INVALID one
3506 */
3507 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3508
3509 spin_unlock_irqrestore(&cq->cq_lock, flags);
3510 return done;
3511}
Ram Amrani993d1b52016-10-10 13:15:39 +03003512
3513int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3514 u8 port_num,
3515 const struct ib_wc *in_wc,
3516 const struct ib_grh *in_grh,
3517 const struct ib_mad_hdr *mad_hdr,
3518 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3519 size_t *out_mad_size, u16 *out_mad_pkey_index)
3520{
3521 struct qedr_dev *dev = get_qedr_dev(ibdev);
3522
3523 DP_DEBUG(dev, QEDR_MSG_GSI,
3524 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3525 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3526 mad_hdr->class_specific, mad_hdr->class_version,
3527 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3528 return IB_MAD_RESULT_SUCCESS;
3529}
3530
3531int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3532 struct ib_port_immutable *immutable)
3533{
3534 struct ib_port_attr attr;
3535 int err;
3536
3537 err = qedr_query_port(ibdev, port_num, &attr);
3538 if (err)
3539 return err;
3540
3541 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3542 immutable->gid_tbl_len = attr.gid_tbl_len;
3543 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3544 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3545 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3546
3547 return 0;
3548}