blob: c4f4c2131a459d472218c7733aa0824142278793 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
46#include "qedr_hsi.h"
47#include <linux/qed/qed_if.h>
48#include "qedr.h"
49#include "verbs.h"
50#include <rdma/qedr-abi.h>
Ram Amranicecbcdd2016-10-10 13:15:34 +030051#include "qedr_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030052
Ram Amrania7efd772016-10-10 13:15:33 +030053#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
54
55int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
56{
57 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
58 return -EINVAL;
59
60 *pkey = QEDR_ROCE_PKEY_DEFAULT;
61 return 0;
62}
63
Ram Amraniac1b36e2016-10-10 13:15:32 +030064int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
65 union ib_gid *sgid)
66{
67 struct qedr_dev *dev = get_qedr_dev(ibdev);
68 int rc = 0;
69
70 if (!rdma_cap_roce_gid_table(ibdev, port))
71 return -ENODEV;
72
73 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
74 if (rc == -EAGAIN) {
75 memcpy(sgid, &zgid, sizeof(*sgid));
76 return 0;
77 }
78
79 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80 sgid->global.interface_id, sgid->global.subnet_prefix);
81
82 return rc;
83}
84
85int qedr_add_gid(struct ib_device *device, u8 port_num,
86 unsigned int index, const union ib_gid *gid,
87 const struct ib_gid_attr *attr, void **context)
88{
89 if (!rdma_cap_roce_gid_table(device, port_num))
90 return -EINVAL;
91
92 if (port_num > QEDR_MAX_PORT)
93 return -EINVAL;
94
95 if (!context)
96 return -EINVAL;
97
98 return 0;
99}
100
101int qedr_del_gid(struct ib_device *device, u8 port_num,
102 unsigned int index, void **context)
103{
104 if (!rdma_cap_roce_gid_table(device, port_num))
105 return -EINVAL;
106
107 if (port_num > QEDR_MAX_PORT)
108 return -EINVAL;
109
110 if (!context)
111 return -EINVAL;
112
113 return 0;
114}
115
116int qedr_query_device(struct ib_device *ibdev,
117 struct ib_device_attr *attr, struct ib_udata *udata)
118{
119 struct qedr_dev *dev = get_qedr_dev(ibdev);
120 struct qedr_device_attr *qattr = &dev->attr;
121
122 if (!dev->rdma_ctx) {
123 DP_ERR(dev,
124 "qedr_query_device called with invalid params rdma_ctx=%p\n",
125 dev->rdma_ctx);
126 return -EINVAL;
127 }
128
129 memset(attr, 0, sizeof(*attr));
130
131 attr->fw_ver = qattr->fw_ver;
132 attr->sys_image_guid = qattr->sys_image_guid;
133 attr->max_mr_size = qattr->max_mr_size;
134 attr->page_size_cap = qattr->page_size_caps;
135 attr->vendor_id = qattr->vendor_id;
136 attr->vendor_part_id = qattr->vendor_part_id;
137 attr->hw_ver = qattr->hw_ver;
138 attr->max_qp = qattr->max_qp;
139 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141 IB_DEVICE_RC_RNR_NAK_GEN |
142 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
143
144 attr->max_sge = qattr->max_sge;
145 attr->max_sge_rd = qattr->max_sge;
146 attr->max_cq = qattr->max_cq;
147 attr->max_cqe = qattr->max_cqe;
148 attr->max_mr = qattr->max_mr;
149 attr->max_mw = qattr->max_mw;
150 attr->max_pd = qattr->max_pd;
151 attr->atomic_cap = dev->atomic_cap;
152 attr->max_fmr = qattr->max_fmr;
153 attr->max_map_per_fmr = 16;
154 attr->max_qp_init_rd_atom =
155 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156 attr->max_qp_rd_atom =
157 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158 attr->max_qp_init_rd_atom);
159
160 attr->max_srq = qattr->max_srq;
161 attr->max_srq_sge = qattr->max_srq_sge;
162 attr->max_srq_wr = qattr->max_srq_wr;
163
164 attr->local_ca_ack_delay = qattr->dev_ack_delay;
165 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167 attr->max_ah = qattr->max_ah;
168
169 return 0;
170}
171
172#define QEDR_SPEED_SDR (1)
173#define QEDR_SPEED_DDR (2)
174#define QEDR_SPEED_QDR (4)
175#define QEDR_SPEED_FDR10 (8)
176#define QEDR_SPEED_FDR (16)
177#define QEDR_SPEED_EDR (32)
178
179static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
180 u8 *ib_width)
181{
182 switch (speed) {
183 case 1000:
184 *ib_speed = QEDR_SPEED_SDR;
185 *ib_width = IB_WIDTH_1X;
186 break;
187 case 10000:
188 *ib_speed = QEDR_SPEED_QDR;
189 *ib_width = IB_WIDTH_1X;
190 break;
191
192 case 20000:
193 *ib_speed = QEDR_SPEED_DDR;
194 *ib_width = IB_WIDTH_4X;
195 break;
196
197 case 25000:
198 *ib_speed = QEDR_SPEED_EDR;
199 *ib_width = IB_WIDTH_1X;
200 break;
201
202 case 40000:
203 *ib_speed = QEDR_SPEED_QDR;
204 *ib_width = IB_WIDTH_4X;
205 break;
206
207 case 50000:
208 *ib_speed = QEDR_SPEED_QDR;
209 *ib_width = IB_WIDTH_4X;
210 break;
211
212 case 100000:
213 *ib_speed = QEDR_SPEED_EDR;
214 *ib_width = IB_WIDTH_4X;
215 break;
216
217 default:
218 /* Unsupported */
219 *ib_speed = QEDR_SPEED_SDR;
220 *ib_width = IB_WIDTH_1X;
221 }
222}
223
224int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
225{
226 struct qedr_dev *dev;
227 struct qed_rdma_port *rdma_port;
228
229 dev = get_qedr_dev(ibdev);
230 if (port > 1) {
231 DP_ERR(dev, "invalid_port=0x%x\n", port);
232 return -EINVAL;
233 }
234
235 if (!dev->rdma_ctx) {
236 DP_ERR(dev, "rdma_ctx is NULL\n");
237 return -EINVAL;
238 }
239
240 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
241 memset(attr, 0, sizeof(*attr));
242
243 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244 attr->state = IB_PORT_ACTIVE;
245 attr->phys_state = 5;
246 } else {
247 attr->state = IB_PORT_DOWN;
248 attr->phys_state = 3;
249 }
250 attr->max_mtu = IB_MTU_4096;
251 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
252 attr->lid = 0;
253 attr->lmc = 0;
254 attr->sm_lid = 0;
255 attr->sm_sl = 0;
256 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257 attr->gid_tbl_len = QEDR_MAX_SGID;
258 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260 attr->qkey_viol_cntr = 0;
261 get_link_speed_and_width(rdma_port->link_speed,
262 &attr->active_speed, &attr->active_width);
263 attr->max_msg_sz = rdma_port->max_msg_size;
264 attr->max_vl_num = 4;
265
266 return 0;
267}
268
269int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270 struct ib_port_modify *props)
271{
272 struct qedr_dev *dev;
273
274 dev = get_qedr_dev(ibdev);
275 if (port > 1) {
276 DP_ERR(dev, "invalid_port=0x%x\n", port);
277 return -EINVAL;
278 }
279
280 return 0;
281}
282
283static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
284 unsigned long len)
285{
286 struct qedr_mm *mm;
287
288 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
289 if (!mm)
290 return -ENOMEM;
291
292 mm->key.phy_addr = phy_addr;
293 /* This function might be called with a length which is not a multiple
294 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295 * forces this granularity by increasing the requested size if needed.
296 * When qedr_mmap is called, it will search the list with the updated
297 * length as a key. To prevent search failures, the length is rounded up
298 * in advance to PAGE_SIZE.
299 */
300 mm->key.len = roundup(len, PAGE_SIZE);
301 INIT_LIST_HEAD(&mm->entry);
302
303 mutex_lock(&uctx->mm_list_lock);
304 list_add(&mm->entry, &uctx->mm_head);
305 mutex_unlock(&uctx->mm_list_lock);
306
307 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309 (unsigned long long)mm->key.phy_addr,
310 (unsigned long)mm->key.len, uctx);
311
312 return 0;
313}
314
315static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
316 unsigned long len)
317{
318 bool found = false;
319 struct qedr_mm *mm;
320
321 mutex_lock(&uctx->mm_list_lock);
322 list_for_each_entry(mm, &uctx->mm_head, entry) {
323 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
324 continue;
325
326 found = true;
327 break;
328 }
329 mutex_unlock(&uctx->mm_list_lock);
330 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332 mm->key.phy_addr, mm->key.len, uctx, found);
333
334 return found;
335}
336
337struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338 struct ib_udata *udata)
339{
340 int rc;
341 struct qedr_ucontext *ctx;
342 struct qedr_alloc_ucontext_resp uresp;
343 struct qedr_dev *dev = get_qedr_dev(ibdev);
344 struct qed_rdma_add_user_out_params oparams;
345
346 if (!udata)
347 return ERR_PTR(-EFAULT);
348
349 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
350 if (!ctx)
351 return ERR_PTR(-ENOMEM);
352
353 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
354 if (rc) {
355 DP_ERR(dev,
356 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
357 rc);
358 goto err;
359 }
360
361 ctx->dpi = oparams.dpi;
362 ctx->dpi_addr = oparams.dpi_addr;
363 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364 ctx->dpi_size = oparams.dpi_size;
365 INIT_LIST_HEAD(&ctx->mm_head);
366 mutex_init(&ctx->mm_list_lock);
367
368 memset(&uresp, 0, sizeof(uresp));
369
370 uresp.db_pa = ctx->dpi_phys_addr;
371 uresp.db_size = ctx->dpi_size;
372 uresp.max_send_wr = dev->attr.max_sqe;
373 uresp.max_recv_wr = dev->attr.max_rqe;
374 uresp.max_srq_wr = dev->attr.max_srq_wr;
375 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378 uresp.max_cqes = QEDR_MAX_CQES;
379
380 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
381 if (rc)
382 goto err;
383
384 ctx->dev = dev;
385
386 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
387 if (rc)
388 goto err;
389
390 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
391 &ctx->ibucontext);
392 return &ctx->ibucontext;
393
394err:
395 kfree(ctx);
396 return ERR_PTR(rc);
397}
398
399int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
400{
401 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402 struct qedr_mm *mm, *tmp;
403 int status = 0;
404
405 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
406 uctx);
407 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
408
409 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412 mm->key.phy_addr, mm->key.len, uctx);
413 list_del(&mm->entry);
414 kfree(mm);
415 }
416
417 kfree(uctx);
418 return status;
419}
420
421int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
422{
423 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424 struct qedr_dev *dev = get_qedr_dev(context->device);
425 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426 u64 unmapped_db = dev->db_phys_addr;
427 unsigned long len = (vma->vm_end - vma->vm_start);
428 int rc = 0;
429 bool found;
430
431 DP_DEBUG(dev, QEDR_MSG_INIT,
432 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434 if (vma->vm_start & (PAGE_SIZE - 1)) {
435 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
436 vma->vm_start);
437 return -EINVAL;
438 }
439
440 found = qedr_search_mmap(ucontext, vm_page, len);
441 if (!found) {
442 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
443 vma->vm_pgoff);
444 return -EINVAL;
445 }
446
447 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
448
449 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
450 dev->db_size))) {
451 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452 if (vma->vm_flags & VM_READ) {
453 DP_ERR(dev, "Trying to map doorbell bar for read\n");
454 return -EPERM;
455 }
456
457 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
458
459 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460 PAGE_SIZE, vma->vm_page_prot);
461 } else {
462 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463 rc = remap_pfn_range(vma, vma->vm_start,
464 vma->vm_pgoff, len, vma->vm_page_prot);
465 }
466 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
467 return rc;
468}
Ram Amrania7efd772016-10-10 13:15:33 +0300469
470struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata)
472{
473 struct qedr_dev *dev = get_qedr_dev(ibdev);
474 struct qedr_ucontext *uctx = NULL;
475 struct qedr_alloc_pd_uresp uresp;
476 struct qedr_pd *pd;
477 u16 pd_id;
478 int rc;
479
480 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
481 (udata && context) ? "User Lib" : "Kernel");
482
483 if (!dev->rdma_ctx) {
484 DP_ERR(dev, "invlaid RDMA context\n");
485 return ERR_PTR(-EINVAL);
486 }
487
488 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
489 if (!pd)
490 return ERR_PTR(-ENOMEM);
491
492 dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
493
494 uresp.pd_id = pd_id;
495 pd->pd_id = pd_id;
496
497 if (udata && context) {
498 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
499 if (rc)
500 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501 uctx = get_qedr_ucontext(context);
502 uctx->pd = pd;
503 pd->uctx = uctx;
504 }
505
506 return &pd->ibpd;
507}
508
509int qedr_dealloc_pd(struct ib_pd *ibpd)
510{
511 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
512 struct qedr_pd *pd = get_qedr_pd(ibpd);
513
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100514 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300515 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100516 return -EINVAL;
517 }
Ram Amrania7efd772016-10-10 13:15:33 +0300518
519 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
520 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
521
522 kfree(pd);
523
524 return 0;
525}
526
527static void qedr_free_pbl(struct qedr_dev *dev,
528 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
529{
530 struct pci_dev *pdev = dev->pdev;
531 int i;
532
533 for (i = 0; i < pbl_info->num_pbls; i++) {
534 if (!pbl[i].va)
535 continue;
536 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
537 pbl[i].va, pbl[i].pa);
538 }
539
540 kfree(pbl);
541}
542
543#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
544#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
545
546#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
547#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
548#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
549
550static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
551 struct qedr_pbl_info *pbl_info,
552 gfp_t flags)
553{
554 struct pci_dev *pdev = dev->pdev;
555 struct qedr_pbl *pbl_table;
556 dma_addr_t *pbl_main_tbl;
557 dma_addr_t pa;
558 void *va;
559 int i;
560
561 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
562 if (!pbl_table)
563 return ERR_PTR(-ENOMEM);
564
565 for (i = 0; i < pbl_info->num_pbls; i++) {
566 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
567 &pa, flags);
568 if (!va)
569 goto err;
570
571 memset(va, 0, pbl_info->pbl_size);
572 pbl_table[i].va = va;
573 pbl_table[i].pa = pa;
574 }
575
576 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
577 * the first one with physical pointers to all of the rest
578 */
579 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
580 for (i = 0; i < pbl_info->num_pbls - 1; i++)
581 pbl_main_tbl[i] = pbl_table[i + 1].pa;
582
583 return pbl_table;
584
585err:
586 for (i--; i >= 0; i--)
587 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
588 pbl_table[i].va, pbl_table[i].pa);
589
590 qedr_free_pbl(dev, pbl_info, pbl_table);
591
592 return ERR_PTR(-ENOMEM);
593}
594
595static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
596 struct qedr_pbl_info *pbl_info,
597 u32 num_pbes, int two_layer_capable)
598{
599 u32 pbl_capacity;
600 u32 pbl_size;
601 u32 num_pbls;
602
603 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
604 if (num_pbes > MAX_PBES_TWO_LAYER) {
605 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
606 num_pbes);
607 return -EINVAL;
608 }
609
610 /* calculate required pbl page size */
611 pbl_size = MIN_FW_PBL_PAGE_SIZE;
612 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
613 NUM_PBES_ON_PAGE(pbl_size);
614
615 while (pbl_capacity < num_pbes) {
616 pbl_size *= 2;
617 pbl_capacity = pbl_size / sizeof(u64);
618 pbl_capacity = pbl_capacity * pbl_capacity;
619 }
620
621 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
622 num_pbls++; /* One for the layer0 ( points to the pbls) */
623 pbl_info->two_layered = true;
624 } else {
625 /* One layered PBL */
626 num_pbls = 1;
627 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
628 roundup_pow_of_two((num_pbes * sizeof(u64))));
629 pbl_info->two_layered = false;
630 }
631
632 pbl_info->num_pbls = num_pbls;
633 pbl_info->pbl_size = pbl_size;
634 pbl_info->num_pbes = num_pbes;
635
636 DP_DEBUG(dev, QEDR_MSG_MR,
637 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
638 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
639
640 return 0;
641}
642
643static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
644 struct qedr_pbl *pbl,
645 struct qedr_pbl_info *pbl_info)
646{
647 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
648 struct qedr_pbl *pbl_tbl;
649 struct scatterlist *sg;
650 struct regpair *pbe;
651 int entry;
652 u32 addr;
653
654 if (!pbl_info->num_pbes)
655 return;
656
657 /* If we have a two layered pbl, the first pbl points to the rest
658 * of the pbls and the first entry lays on the second pbl in the table
659 */
660 if (pbl_info->two_layered)
661 pbl_tbl = &pbl[1];
662 else
663 pbl_tbl = pbl;
664
665 pbe = (struct regpair *)pbl_tbl->va;
666 if (!pbe) {
667 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
668 return;
669 }
670
671 pbe_cnt = 0;
672
673 shift = ilog2(umem->page_size);
674
675 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
676 pages = sg_dma_len(sg) >> shift;
677 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
678 /* store the page address in pbe */
679 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
680 umem->page_size * pg_cnt);
681 addr = upper_32_bits(sg_dma_address(sg) +
682 umem->page_size * pg_cnt);
683 pbe->hi = cpu_to_le32(addr);
684 pbe_cnt++;
685 total_num_pbes++;
686 pbe++;
687
688 if (total_num_pbes == pbl_info->num_pbes)
689 return;
690
691 /* If the given pbl is full storing the pbes,
692 * move to next pbl.
693 */
694 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
695 pbl_tbl++;
696 pbe = (struct regpair *)pbl_tbl->va;
697 pbe_cnt = 0;
698 }
699 }
700 }
701}
702
703static int qedr_copy_cq_uresp(struct qedr_dev *dev,
704 struct qedr_cq *cq, struct ib_udata *udata)
705{
706 struct qedr_create_cq_uresp uresp;
707 int rc;
708
709 memset(&uresp, 0, sizeof(uresp));
710
711 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
712 uresp.icid = cq->icid;
713
714 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
715 if (rc)
716 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
717
718 return rc;
719}
720
721static void consume_cqe(struct qedr_cq *cq)
722{
723 if (cq->latest_cqe == cq->toggle_cqe)
724 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
725
726 cq->latest_cqe = qed_chain_consume(&cq->pbl);
727}
728
729static inline int qedr_align_cq_entries(int entries)
730{
731 u64 size, aligned_size;
732
733 /* We allocate an extra entry that we don't report to the FW. */
734 size = (entries + 1) * QEDR_CQE_SIZE;
735 aligned_size = ALIGN(size, PAGE_SIZE);
736
737 return aligned_size / QEDR_CQE_SIZE;
738}
739
740static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
741 struct qedr_dev *dev,
742 struct qedr_userq *q,
743 u64 buf_addr, size_t buf_len,
744 int access, int dmasync)
745{
746 int page_cnt;
747 int rc;
748
749 q->buf_addr = buf_addr;
750 q->buf_len = buf_len;
751 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
752 if (IS_ERR(q->umem)) {
753 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
754 PTR_ERR(q->umem));
755 return PTR_ERR(q->umem);
756 }
757
758 page_cnt = ib_umem_page_count(q->umem);
759 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
760 if (rc)
761 goto err0;
762
763 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
764 if (IS_ERR_OR_NULL(q->pbl_tbl))
765 goto err0;
766
767 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
768
769 return 0;
770
771err0:
772 ib_umem_release(q->umem);
773
774 return rc;
775}
776
777static inline void qedr_init_cq_params(struct qedr_cq *cq,
778 struct qedr_ucontext *ctx,
779 struct qedr_dev *dev, int vector,
780 int chain_entries, int page_cnt,
781 u64 pbl_ptr,
782 struct qed_rdma_create_cq_in_params
783 *params)
784{
785 memset(params, 0, sizeof(*params));
786 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
787 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
788 params->cnq_id = vector;
789 params->cq_size = chain_entries - 1;
790 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
791 params->pbl_num_pages = page_cnt;
792 params->pbl_ptr = pbl_ptr;
793 params->pbl_two_level = 0;
794}
795
796static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
797{
798 /* Flush data before signalling doorbell */
799 wmb();
800 cq->db.data.agg_flags = flags;
801 cq->db.data.value = cpu_to_le32(cons);
802 writeq(cq->db.raw, cq->db_addr);
803
804 /* Make sure write would stick */
805 mmiowb();
806}
807
808int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
809{
810 struct qedr_cq *cq = get_qedr_cq(ibcq);
811 unsigned long sflags;
812
813 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
814 return 0;
815
816 spin_lock_irqsave(&cq->cq_lock, sflags);
817
818 cq->arm_flags = 0;
819
820 if (flags & IB_CQ_SOLICITED)
821 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
822
823 if (flags & IB_CQ_NEXT_COMP)
824 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
825
826 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
827
828 spin_unlock_irqrestore(&cq->cq_lock, sflags);
829
830 return 0;
831}
832
833struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
834 const struct ib_cq_init_attr *attr,
835 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
836{
837 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
838 struct qed_rdma_destroy_cq_out_params destroy_oparams;
839 struct qed_rdma_destroy_cq_in_params destroy_iparams;
840 struct qedr_dev *dev = get_qedr_dev(ibdev);
841 struct qed_rdma_create_cq_in_params params;
842 struct qedr_create_cq_ureq ureq;
843 int vector = attr->comp_vector;
844 int entries = attr->cqe;
845 struct qedr_cq *cq;
846 int chain_entries;
847 int page_cnt;
848 u64 pbl_ptr;
849 u16 icid;
850 int rc;
851
852 DP_DEBUG(dev, QEDR_MSG_INIT,
853 "create_cq: called from %s. entries=%d, vector=%d\n",
854 udata ? "User Lib" : "Kernel", entries, vector);
855
856 if (entries > QEDR_MAX_CQES) {
857 DP_ERR(dev,
858 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
859 entries, QEDR_MAX_CQES);
860 return ERR_PTR(-EINVAL);
861 }
862
863 chain_entries = qedr_align_cq_entries(entries);
864 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
865
866 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
867 if (!cq)
868 return ERR_PTR(-ENOMEM);
869
870 if (udata) {
871 memset(&ureq, 0, sizeof(ureq));
872 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
873 DP_ERR(dev,
874 "create cq: problem copying data from user space\n");
875 goto err0;
876 }
877
878 if (!ureq.len) {
879 DP_ERR(dev,
880 "create cq: cannot create a cq with 0 entries\n");
881 goto err0;
882 }
883
884 cq->cq_type = QEDR_CQ_TYPE_USER;
885
886 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
887 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
888 if (rc)
889 goto err0;
890
891 pbl_ptr = cq->q.pbl_tbl->pa;
892 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200893
894 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300895 } else {
896 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
897
898 rc = dev->ops->common->chain_alloc(dev->cdev,
899 QED_CHAIN_USE_TO_CONSUME,
900 QED_CHAIN_MODE_PBL,
901 QED_CHAIN_CNT_TYPE_U32,
902 chain_entries,
903 sizeof(union rdma_cqe),
904 &cq->pbl);
905 if (rc)
906 goto err1;
907
908 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
909 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200910 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300911 }
912
913 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
914 pbl_ptr, &params);
915
916 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
917 if (rc)
918 goto err2;
919
920 cq->icid = icid;
921 cq->sig = QEDR_CQ_MAGIC_NUMBER;
922 spin_lock_init(&cq->cq_lock);
923
924 if (ib_ctx) {
925 rc = qedr_copy_cq_uresp(dev, cq, udata);
926 if (rc)
927 goto err3;
928 } else {
929 /* Generate doorbell address. */
930 cq->db_addr = dev->db_addr +
931 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
932 cq->db.data.icid = cq->icid;
933 cq->db.data.params = DB_AGG_CMD_SET <<
934 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
935
936 /* point to the very last element, passing it we will toggle */
937 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
938 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
939 cq->latest_cqe = NULL;
940 consume_cqe(cq);
941 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
942 }
943
944 DP_DEBUG(dev, QEDR_MSG_CQ,
945 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
946 cq->icid, cq, params.cq_size);
947
948 return &cq->ibcq;
949
950err3:
951 destroy_iparams.icid = cq->icid;
952 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
953 &destroy_oparams);
954err2:
955 if (udata)
956 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
957 else
958 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
959err1:
960 if (udata)
961 ib_umem_release(cq->q.umem);
962err0:
963 kfree(cq);
964 return ERR_PTR(-EINVAL);
965}
966
967int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
968{
969 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
970 struct qedr_cq *cq = get_qedr_cq(ibcq);
971
972 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
973
974 return 0;
975}
976
977int qedr_destroy_cq(struct ib_cq *ibcq)
978{
979 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
980 struct qed_rdma_destroy_cq_out_params oparams;
981 struct qed_rdma_destroy_cq_in_params iparams;
982 struct qedr_cq *cq = get_qedr_cq(ibcq);
983
984 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
985
986 /* GSIs CQs are handled by driver, so they don't exist in the FW */
987 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
Amrani, Rama1211352016-12-22 14:40:34 +0200988 int rc;
989
Ram Amrania7efd772016-10-10 13:15:33 +0300990 iparams.icid = cq->icid;
Amrani, Rama1211352016-12-22 14:40:34 +0200991 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
992 &oparams);
993 if (rc)
994 return rc;
Ram Amrania7efd772016-10-10 13:15:33 +0300995 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
996 }
997
998 if (ibcq->uobject && ibcq->uobject->context) {
999 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1000 ib_umem_release(cq->q.umem);
1001 }
1002
1003 kfree(cq);
1004
1005 return 0;
1006}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001007
1008static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1009 struct ib_qp_attr *attr,
1010 int attr_mask,
1011 struct qed_rdma_modify_qp_in_params
1012 *qp_params)
1013{
1014 enum rdma_network_type nw_type;
1015 struct ib_gid_attr gid_attr;
1016 union ib_gid gid;
1017 u32 ipv4_addr;
1018 int rc = 0;
1019 int i;
1020
1021 rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1022 attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1023 if (rc)
1024 return rc;
1025
1026 if (!memcmp(&gid, &zgid, sizeof(gid)))
1027 return -ENOENT;
1028
1029 if (gid_attr.ndev) {
1030 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1031
1032 dev_put(gid_attr.ndev);
1033 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1034 switch (nw_type) {
1035 case RDMA_NETWORK_IPV6:
1036 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1037 sizeof(qp_params->sgid));
1038 memcpy(&qp_params->dgid.bytes[0],
1039 &attr->ah_attr.grh.dgid,
1040 sizeof(qp_params->dgid));
1041 qp_params->roce_mode = ROCE_V2_IPV6;
1042 SET_FIELD(qp_params->modify_flags,
1043 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1044 break;
1045 case RDMA_NETWORK_IB:
1046 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1047 sizeof(qp_params->sgid));
1048 memcpy(&qp_params->dgid.bytes[0],
1049 &attr->ah_attr.grh.dgid,
1050 sizeof(qp_params->dgid));
1051 qp_params->roce_mode = ROCE_V1;
1052 break;
1053 case RDMA_NETWORK_IPV4:
1054 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1055 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1056 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1057 qp_params->sgid.ipv4_addr = ipv4_addr;
1058 ipv4_addr =
1059 qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1060 qp_params->dgid.ipv4_addr = ipv4_addr;
1061 SET_FIELD(qp_params->modify_flags,
1062 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1063 qp_params->roce_mode = ROCE_V2_IPV4;
1064 break;
1065 }
1066 }
1067
1068 for (i = 0; i < 4; i++) {
1069 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1070 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1071 }
1072
1073 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1074 qp_params->vlan_id = 0;
1075
1076 return 0;
1077}
1078
1079static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1080{
1081 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1082 ib_umem_release(qp->usq.umem);
1083}
1084
1085static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1086{
1087 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1088 ib_umem_release(qp->urq.umem);
1089}
1090
1091static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1092{
1093 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1094 kfree(qp->wqe_wr_id);
1095}
1096
1097static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1098{
1099 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1100 kfree(qp->rqe_wr_id);
1101}
1102
1103static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1104 struct ib_qp_init_attr *attrs)
1105{
1106 struct qedr_device_attr *qattr = &dev->attr;
1107
1108 /* QP0... attrs->qp_type == IB_QPT_GSI */
1109 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1110 DP_DEBUG(dev, QEDR_MSG_QP,
1111 "create qp: unsupported qp type=0x%x requested\n",
1112 attrs->qp_type);
1113 return -EINVAL;
1114 }
1115
1116 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1117 DP_ERR(dev,
1118 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1119 attrs->cap.max_send_wr, qattr->max_sqe);
1120 return -EINVAL;
1121 }
1122
1123 if (attrs->cap.max_inline_data > qattr->max_inline) {
1124 DP_ERR(dev,
1125 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1126 attrs->cap.max_inline_data, qattr->max_inline);
1127 return -EINVAL;
1128 }
1129
1130 if (attrs->cap.max_send_sge > qattr->max_sge) {
1131 DP_ERR(dev,
1132 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1133 attrs->cap.max_send_sge, qattr->max_sge);
1134 return -EINVAL;
1135 }
1136
1137 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1138 DP_ERR(dev,
1139 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1140 attrs->cap.max_recv_sge, qattr->max_sge);
1141 return -EINVAL;
1142 }
1143
1144 /* Unprivileged user space cannot create special QP */
1145 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1146 DP_ERR(dev,
1147 "create qp: userspace can't create special QPs of type=0x%x\n",
1148 attrs->qp_type);
1149 return -EINVAL;
1150 }
1151
1152 return 0;
1153}
1154
1155static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1156 struct qedr_qp *qp)
1157{
1158 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1159 uresp->rq_icid = qp->icid;
1160}
1161
1162static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1163 struct qedr_qp *qp)
1164{
1165 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1166 uresp->sq_icid = qp->icid + 1;
1167}
1168
1169static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1170 struct qedr_qp *qp, struct ib_udata *udata)
1171{
1172 struct qedr_create_qp_uresp uresp;
1173 int rc;
1174
1175 memset(&uresp, 0, sizeof(uresp));
1176 qedr_copy_sq_uresp(&uresp, qp);
1177 qedr_copy_rq_uresp(&uresp, qp);
1178
1179 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1180 uresp.qp_id = qp->qp_id;
1181
1182 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1183 if (rc)
1184 DP_ERR(dev,
1185 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1186 qp->icid);
1187
1188 return rc;
1189}
1190
1191static void qedr_set_qp_init_params(struct qedr_dev *dev,
1192 struct qedr_qp *qp,
1193 struct qedr_pd *pd,
1194 struct ib_qp_init_attr *attrs)
1195{
1196 qp->pd = pd;
1197
1198 spin_lock_init(&qp->q_lock);
1199
1200 qp->qp_type = attrs->qp_type;
1201 qp->max_inline_data = attrs->cap.max_inline_data;
1202 qp->sq.max_sges = attrs->cap.max_send_sge;
1203 qp->state = QED_ROCE_QP_STATE_RESET;
1204 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1205 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1206 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1207 qp->dev = dev;
1208
1209 DP_DEBUG(dev, QEDR_MSG_QP,
1210 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1211 pd->pd_id, qp->qp_type, qp->max_inline_data,
1212 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1213 DP_DEBUG(dev, QEDR_MSG_QP,
1214 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1215 qp->sq.max_sges, qp->sq_cq->icid);
1216 qp->rq.max_sges = attrs->cap.max_recv_sge;
1217 DP_DEBUG(dev, QEDR_MSG_QP,
1218 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1219 qp->rq.max_sges, qp->rq_cq->icid);
1220}
1221
1222static inline void
1223qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
1224 struct qedr_create_qp_ureq *ureq)
1225{
1226 /* QP handle to be written in CQE */
1227 params->qp_handle_lo = ureq->qp_handle_lo;
1228 params->qp_handle_hi = ureq->qp_handle_hi;
1229}
1230
1231static inline void
1232qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1233{
1234 qp->sq.db = dev->db_addr +
1235 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1236 qp->sq.db_data.data.icid = qp->icid + 1;
1237}
1238
1239static inline void
1240qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1241{
1242 qp->rq.db = dev->db_addr +
1243 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1244 qp->rq.db_data.data.icid = qp->icid;
1245}
1246
1247static inline int
1248qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
1249 struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
1250{
1251 /* Allocate driver internal RQ array */
1252 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1253 GFP_KERNEL);
1254 if (!qp->rqe_wr_id)
1255 return -ENOMEM;
1256
1257 DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
1258
1259 return 0;
1260}
1261
1262static inline int
1263qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
1264 struct qedr_qp *qp,
1265 struct ib_qp_init_attr *attrs,
1266 struct qed_rdma_create_qp_in_params *params)
1267{
1268 u32 temp_max_wr;
1269
1270 /* Allocate driver internal SQ array */
1271 temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
1272 temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
1273
1274 /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1275 qp->sq.max_wr = (u16)temp_max_wr;
1276 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1277 GFP_KERNEL);
1278 if (!qp->wqe_wr_id)
1279 return -ENOMEM;
1280
1281 DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
1282
1283 /* QP handle to be written in CQE */
1284 params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
1285 params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
1286
1287 return 0;
1288}
1289
1290static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
1291 struct qedr_qp *qp,
1292 struct ib_qp_init_attr *attrs)
1293{
1294 u32 n_sq_elems, n_sq_entries;
1295 int rc;
1296
1297 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1298 * the ring. The ring should allow at least a single WR, even if the
1299 * user requested none, due to allocation issues.
1300 */
1301 n_sq_entries = attrs->cap.max_send_wr;
1302 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1303 n_sq_entries = max_t(u32, n_sq_entries, 1);
1304 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1305 rc = dev->ops->common->chain_alloc(dev->cdev,
1306 QED_CHAIN_USE_TO_PRODUCE,
1307 QED_CHAIN_MODE_PBL,
1308 QED_CHAIN_CNT_TYPE_U32,
1309 n_sq_elems,
1310 QEDR_SQE_ELEMENT_SIZE,
1311 &qp->sq.pbl);
1312 if (rc) {
1313 DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
1314 return rc;
1315 }
1316
1317 DP_DEBUG(dev, QEDR_MSG_SQ,
1318 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1319 qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
1320 n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
1321 return 0;
1322}
1323
1324static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
1325 struct qedr_qp *qp,
1326 struct ib_qp_init_attr *attrs)
1327{
1328 u32 n_rq_elems, n_rq_entries;
1329 int rc;
1330
1331 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1332 * the ring. There ring should allow at least a single WR, even if the
1333 * user requested none, due to allocation issues.
1334 */
1335 n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
1336 n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1337 rc = dev->ops->common->chain_alloc(dev->cdev,
1338 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1339 QED_CHAIN_MODE_PBL,
1340 QED_CHAIN_CNT_TYPE_U32,
1341 n_rq_elems,
1342 QEDR_RQE_ELEMENT_SIZE,
1343 &qp->rq.pbl);
1344
1345 if (rc) {
1346 DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
1347 return -ENOMEM;
1348 }
1349
1350 DP_DEBUG(dev, QEDR_MSG_RQ,
1351 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1352 qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
1353 n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
1354
1355 /* n_rq_entries < u16 so the casting is safe */
1356 qp->rq.max_wr = (u16)n_rq_entries;
1357
1358 return 0;
1359}
1360
1361static inline void
1362qedr_init_qp_in_params_sq(struct qedr_dev *dev,
1363 struct qedr_pd *pd,
1364 struct qedr_qp *qp,
1365 struct ib_qp_init_attr *attrs,
1366 struct ib_udata *udata,
1367 struct qed_rdma_create_qp_in_params *params)
1368{
1369 /* QP handle to be written in an async event */
1370 params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
1371 params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
1372
1373 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1374 params->fmr_and_reserved_lkey = !udata;
1375 params->pd = pd->pd_id;
1376 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1377 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1378 params->max_sq_sges = 0;
1379 params->stats_queue = 0;
1380
1381 if (udata) {
1382 params->sq_num_pages = qp->usq.pbl_info.num_pbes;
1383 params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1384 } else {
1385 params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1386 params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1387 }
1388}
1389
1390static inline void
1391qedr_init_qp_in_params_rq(struct qedr_qp *qp,
1392 struct ib_qp_init_attr *attrs,
1393 struct ib_udata *udata,
1394 struct qed_rdma_create_qp_in_params *params)
1395{
1396 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1397 params->srq_id = 0;
1398 params->use_srq = false;
1399
1400 if (udata) {
1401 params->rq_num_pages = qp->urq.pbl_info.num_pbes;
1402 params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1403 } else {
1404 params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1405 params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1406 }
1407}
1408
1409static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1410{
1411 DP_DEBUG(dev, QEDR_MSG_QP,
1412 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1413 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
1414 qp->urq.buf_len);
1415}
1416
1417static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
1418 struct qedr_dev *dev,
1419 struct qedr_qp *qp,
1420 struct qedr_create_qp_ureq *ureq)
1421{
1422 int rc;
1423
1424 /* SQ - read access only (0), dma sync not required (0) */
1425 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
1426 ureq->sq_len, 0, 0);
1427 if (rc)
1428 return rc;
1429
1430 /* RQ - read access only (0), dma sync not required (0) */
1431 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
1432 ureq->rq_len, 0, 0);
1433
1434 if (rc)
1435 qedr_cleanup_user_sq(dev, qp);
1436 return rc;
1437}
1438
1439static inline int
1440qedr_init_kernel_qp(struct qedr_dev *dev,
1441 struct qedr_qp *qp,
1442 struct ib_qp_init_attr *attrs,
1443 struct qed_rdma_create_qp_in_params *params)
1444{
1445 int rc;
1446
1447 rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
1448 if (rc) {
1449 DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
1450 return rc;
1451 }
1452
1453 rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
1454 if (rc) {
1455 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1456 DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
1457 return rc;
1458 }
1459
1460 rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
1461 if (rc) {
1462 qedr_cleanup_kernel_sq(dev, qp);
1463 DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
1464 return rc;
1465 }
1466
1467 rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
1468 if (rc) {
1469 DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
1470 qedr_cleanup_kernel_sq(dev, qp);
1471 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1472 return rc;
1473 }
1474
1475 return rc;
1476}
1477
1478struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1479 struct ib_qp_init_attr *attrs,
1480 struct ib_udata *udata)
1481{
1482 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1483 struct qed_rdma_create_qp_out_params out_params;
1484 struct qed_rdma_create_qp_in_params in_params;
1485 struct qedr_pd *pd = get_qedr_pd(ibpd);
1486 struct ib_ucontext *ib_ctx = NULL;
1487 struct qedr_ucontext *ctx = NULL;
1488 struct qedr_create_qp_ureq ureq;
1489 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001490 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001491 int rc = 0;
1492
1493 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1494 udata ? "user library" : "kernel", pd);
1495
1496 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1497 if (rc)
1498 return ERR_PTR(rc);
1499
Wei Yongjun181d8012016-10-28 16:33:47 +00001500 if (attrs->srq)
1501 return ERR_PTR(-EINVAL);
1502
Ram Amranicecbcdd2016-10-10 13:15:34 +03001503 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1504 if (!qp)
1505 return ERR_PTR(-ENOMEM);
1506
Ram Amranicecbcdd2016-10-10 13:15:34 +03001507 DP_DEBUG(dev, QEDR_MSG_QP,
1508 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1509 get_qedr_cq(attrs->send_cq),
1510 get_qedr_cq(attrs->send_cq)->icid,
1511 get_qedr_cq(attrs->recv_cq),
1512 get_qedr_cq(attrs->recv_cq)->icid);
1513
1514 qedr_set_qp_init_params(dev, qp, pd, attrs);
1515
Ram Amrani04886772016-10-10 13:15:38 +03001516 if (attrs->qp_type == IB_QPT_GSI) {
1517 if (udata) {
1518 DP_ERR(dev,
1519 "create qp: unexpected udata when creating GSI QP\n");
1520 goto err0;
1521 }
Wei Yongjun181d8012016-10-28 16:33:47 +00001522 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1523 if (IS_ERR(ibqp))
1524 kfree(qp);
1525 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001526 }
1527
Ram Amranicecbcdd2016-10-10 13:15:34 +03001528 memset(&in_params, 0, sizeof(in_params));
1529
1530 if (udata) {
1531 if (!(udata && ibpd->uobject && ibpd->uobject->context))
1532 goto err0;
1533
1534 ib_ctx = ibpd->uobject->context;
1535 ctx = get_qedr_ucontext(ib_ctx);
1536
1537 memset(&ureq, 0, sizeof(ureq));
1538 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1539 DP_ERR(dev,
1540 "create qp: problem copying data from user space\n");
1541 goto err0;
1542 }
1543
1544 rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
1545 if (rc)
1546 goto err0;
1547
1548 qedr_init_qp_user_params(&in_params, &ureq);
1549 } else {
1550 rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
1551 if (rc)
1552 goto err0;
1553 }
1554
1555 qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
1556 qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
1557
1558 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1559 &in_params, &out_params);
1560
1561 if (!qp->qed_qp)
1562 goto err1;
1563
1564 qp->qp_id = out_params.qp_id;
1565 qp->icid = out_params.icid;
1566 qp->ibqp.qp_num = qp->qp_id;
1567
1568 if (udata) {
1569 rc = qedr_copy_qp_uresp(dev, qp, udata);
1570 if (rc)
1571 goto err2;
1572
1573 qedr_qp_user_print(dev, qp);
1574 } else {
1575 qedr_init_qp_kernel_doorbell_sq(dev, qp);
1576 qedr_init_qp_kernel_doorbell_rq(dev, qp);
1577 }
1578
1579 DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
1580 udata ? "user" : "kernel", qp);
1581
1582 return &qp->ibqp;
1583
1584err2:
1585 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1586 if (rc)
1587 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1588err1:
1589 if (udata) {
1590 qedr_cleanup_user_sq(dev, qp);
1591 qedr_cleanup_user_rq(dev, qp);
1592 } else {
1593 qedr_cleanup_kernel_sq(dev, qp);
1594 qedr_cleanup_kernel_rq(dev, qp);
1595 }
1596
1597err0:
1598 kfree(qp);
1599
1600 return ERR_PTR(-EFAULT);
1601}
1602
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001603static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001604{
1605 switch (qp_state) {
1606 case QED_ROCE_QP_STATE_RESET:
1607 return IB_QPS_RESET;
1608 case QED_ROCE_QP_STATE_INIT:
1609 return IB_QPS_INIT;
1610 case QED_ROCE_QP_STATE_RTR:
1611 return IB_QPS_RTR;
1612 case QED_ROCE_QP_STATE_RTS:
1613 return IB_QPS_RTS;
1614 case QED_ROCE_QP_STATE_SQD:
1615 return IB_QPS_SQD;
1616 case QED_ROCE_QP_STATE_ERR:
1617 return IB_QPS_ERR;
1618 case QED_ROCE_QP_STATE_SQE:
1619 return IB_QPS_SQE;
1620 }
1621 return IB_QPS_ERR;
1622}
1623
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001624static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1625 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001626{
1627 switch (qp_state) {
1628 case IB_QPS_RESET:
1629 return QED_ROCE_QP_STATE_RESET;
1630 case IB_QPS_INIT:
1631 return QED_ROCE_QP_STATE_INIT;
1632 case IB_QPS_RTR:
1633 return QED_ROCE_QP_STATE_RTR;
1634 case IB_QPS_RTS:
1635 return QED_ROCE_QP_STATE_RTS;
1636 case IB_QPS_SQD:
1637 return QED_ROCE_QP_STATE_SQD;
1638 case IB_QPS_ERR:
1639 return QED_ROCE_QP_STATE_ERR;
1640 default:
1641 return QED_ROCE_QP_STATE_ERR;
1642 }
1643}
1644
1645static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1646{
1647 qed_chain_reset(&qph->pbl);
1648 qph->prod = 0;
1649 qph->cons = 0;
1650 qph->wqe_cons = 0;
1651 qph->db_data.data.value = cpu_to_le16(0);
1652}
1653
1654static int qedr_update_qp_state(struct qedr_dev *dev,
1655 struct qedr_qp *qp,
1656 enum qed_roce_qp_state new_state)
1657{
1658 int status = 0;
1659
1660 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001661 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001662
1663 switch (qp->state) {
1664 case QED_ROCE_QP_STATE_RESET:
1665 switch (new_state) {
1666 case QED_ROCE_QP_STATE_INIT:
1667 qp->prev_wqe_size = 0;
1668 qedr_reset_qp_hwq_info(&qp->sq);
1669 qedr_reset_qp_hwq_info(&qp->rq);
1670 break;
1671 default:
1672 status = -EINVAL;
1673 break;
1674 };
1675 break;
1676 case QED_ROCE_QP_STATE_INIT:
1677 switch (new_state) {
1678 case QED_ROCE_QP_STATE_RTR:
1679 /* Update doorbell (in case post_recv was
1680 * done before move to RTR)
1681 */
1682 wmb();
1683 writel(qp->rq.db_data.raw, qp->rq.db);
1684 /* Make sure write takes effect */
1685 mmiowb();
1686 break;
1687 case QED_ROCE_QP_STATE_ERR:
1688 break;
1689 default:
1690 /* Invalid state change. */
1691 status = -EINVAL;
1692 break;
1693 };
1694 break;
1695 case QED_ROCE_QP_STATE_RTR:
1696 /* RTR->XXX */
1697 switch (new_state) {
1698 case QED_ROCE_QP_STATE_RTS:
1699 break;
1700 case QED_ROCE_QP_STATE_ERR:
1701 break;
1702 default:
1703 /* Invalid state change. */
1704 status = -EINVAL;
1705 break;
1706 };
1707 break;
1708 case QED_ROCE_QP_STATE_RTS:
1709 /* RTS->XXX */
1710 switch (new_state) {
1711 case QED_ROCE_QP_STATE_SQD:
1712 break;
1713 case QED_ROCE_QP_STATE_ERR:
1714 break;
1715 default:
1716 /* Invalid state change. */
1717 status = -EINVAL;
1718 break;
1719 };
1720 break;
1721 case QED_ROCE_QP_STATE_SQD:
1722 /* SQD->XXX */
1723 switch (new_state) {
1724 case QED_ROCE_QP_STATE_RTS:
1725 case QED_ROCE_QP_STATE_ERR:
1726 break;
1727 default:
1728 /* Invalid state change. */
1729 status = -EINVAL;
1730 break;
1731 };
1732 break;
1733 case QED_ROCE_QP_STATE_ERR:
1734 /* ERR->XXX */
1735 switch (new_state) {
1736 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001737 if ((qp->rq.prod != qp->rq.cons) ||
1738 (qp->sq.prod != qp->sq.cons)) {
1739 DP_NOTICE(dev,
1740 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1741 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1742 qp->sq.cons);
1743 status = -EINVAL;
1744 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001745 break;
1746 default:
1747 status = -EINVAL;
1748 break;
1749 };
1750 break;
1751 default:
1752 status = -EINVAL;
1753 break;
1754 };
1755
1756 return status;
1757}
1758
1759int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1760 int attr_mask, struct ib_udata *udata)
1761{
1762 struct qedr_qp *qp = get_qedr_qp(ibqp);
1763 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1764 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1765 enum ib_qp_state old_qp_state, new_qp_state;
1766 int rc = 0;
1767
1768 DP_DEBUG(dev, QEDR_MSG_QP,
1769 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1770 attr->qp_state);
1771
1772 old_qp_state = qedr_get_ibqp_state(qp->state);
1773 if (attr_mask & IB_QP_STATE)
1774 new_qp_state = attr->qp_state;
1775 else
1776 new_qp_state = old_qp_state;
1777
1778 if (!ib_modify_qp_is_ok
1779 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1780 IB_LINK_LAYER_ETHERNET)) {
1781 DP_ERR(dev,
1782 "modify qp: invalid attribute mask=0x%x specified for\n"
1783 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1784 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1785 new_qp_state);
1786 rc = -EINVAL;
1787 goto err;
1788 }
1789
1790 /* Translate the masks... */
1791 if (attr_mask & IB_QP_STATE) {
1792 SET_FIELD(qp_params.modify_flags,
1793 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1794 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1795 }
1796
1797 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1798 qp_params.sqd_async = true;
1799
1800 if (attr_mask & IB_QP_PKEY_INDEX) {
1801 SET_FIELD(qp_params.modify_flags,
1802 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1803 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1804 rc = -EINVAL;
1805 goto err;
1806 }
1807
1808 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1809 }
1810
1811 if (attr_mask & IB_QP_QKEY)
1812 qp->qkey = attr->qkey;
1813
1814 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1815 SET_FIELD(qp_params.modify_flags,
1816 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1817 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1818 IB_ACCESS_REMOTE_READ;
1819 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1820 IB_ACCESS_REMOTE_WRITE;
1821 qp_params.incoming_atomic_en = attr->qp_access_flags &
1822 IB_ACCESS_REMOTE_ATOMIC;
1823 }
1824
1825 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1826 if (attr_mask & IB_QP_PATH_MTU) {
1827 if (attr->path_mtu < IB_MTU_256 ||
1828 attr->path_mtu > IB_MTU_4096) {
1829 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1830 rc = -EINVAL;
1831 goto err;
1832 }
1833 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1834 ib_mtu_enum_to_int(iboe_get_mtu
1835 (dev->ndev->mtu)));
1836 }
1837
1838 if (!qp->mtu) {
1839 qp->mtu =
1840 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1841 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1842 }
1843
1844 SET_FIELD(qp_params.modify_flags,
1845 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1846
1847 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1848 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1849 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1850
1851 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1852
1853 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1854 if (rc) {
1855 DP_ERR(dev,
1856 "modify qp: problems with GID index %d (rc=%d)\n",
1857 attr->ah_attr.grh.sgid_index, rc);
1858 return rc;
1859 }
1860
1861 rc = qedr_get_dmac(dev, &attr->ah_attr,
1862 qp_params.remote_mac_addr);
1863 if (rc)
1864 return rc;
1865
1866 qp_params.use_local_mac = true;
1867 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1868
1869 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1870 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1871 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1872 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1873 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1874 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1875 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1876 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001877
1878 qp_params.mtu = qp->mtu;
1879 qp_params.lb_indication = false;
1880 }
1881
1882 if (!qp_params.mtu) {
1883 /* Stay with current MTU */
1884 if (qp->mtu)
1885 qp_params.mtu = qp->mtu;
1886 else
1887 qp_params.mtu =
1888 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1889 }
1890
1891 if (attr_mask & IB_QP_TIMEOUT) {
1892 SET_FIELD(qp_params.modify_flags,
1893 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1894
1895 qp_params.ack_timeout = attr->timeout;
1896 if (attr->timeout) {
1897 u32 temp;
1898
1899 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1900 /* FW requires [msec] */
1901 qp_params.ack_timeout = temp;
1902 } else {
1903 /* Infinite */
1904 qp_params.ack_timeout = 0;
1905 }
1906 }
1907 if (attr_mask & IB_QP_RETRY_CNT) {
1908 SET_FIELD(qp_params.modify_flags,
1909 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1910 qp_params.retry_cnt = attr->retry_cnt;
1911 }
1912
1913 if (attr_mask & IB_QP_RNR_RETRY) {
1914 SET_FIELD(qp_params.modify_flags,
1915 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1916 qp_params.rnr_retry_cnt = attr->rnr_retry;
1917 }
1918
1919 if (attr_mask & IB_QP_RQ_PSN) {
1920 SET_FIELD(qp_params.modify_flags,
1921 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1922 qp_params.rq_psn = attr->rq_psn;
1923 qp->rq_psn = attr->rq_psn;
1924 }
1925
1926 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1927 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1928 rc = -EINVAL;
1929 DP_ERR(dev,
1930 "unsupported max_rd_atomic=%d, supported=%d\n",
1931 attr->max_rd_atomic,
1932 dev->attr.max_qp_req_rd_atomic_resc);
1933 goto err;
1934 }
1935
1936 SET_FIELD(qp_params.modify_flags,
1937 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1938 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1939 }
1940
1941 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1942 SET_FIELD(qp_params.modify_flags,
1943 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1944 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1945 }
1946
1947 if (attr_mask & IB_QP_SQ_PSN) {
1948 SET_FIELD(qp_params.modify_flags,
1949 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1950 qp_params.sq_psn = attr->sq_psn;
1951 qp->sq_psn = attr->sq_psn;
1952 }
1953
1954 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1955 if (attr->max_dest_rd_atomic >
1956 dev->attr.max_qp_resp_rd_atomic_resc) {
1957 DP_ERR(dev,
1958 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1959 attr->max_dest_rd_atomic,
1960 dev->attr.max_qp_resp_rd_atomic_resc);
1961
1962 rc = -EINVAL;
1963 goto err;
1964 }
1965
1966 SET_FIELD(qp_params.modify_flags,
1967 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1968 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1969 }
1970
1971 if (attr_mask & IB_QP_DEST_QPN) {
1972 SET_FIELD(qp_params.modify_flags,
1973 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1974
1975 qp_params.dest_qp = attr->dest_qp_num;
1976 qp->dest_qp_num = attr->dest_qp_num;
1977 }
1978
1979 if (qp->qp_type != IB_QPT_GSI)
1980 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1981 qp->qed_qp, &qp_params);
1982
1983 if (attr_mask & IB_QP_STATE) {
1984 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02001985 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001986 qp->state = qp_params.new_state;
1987 }
1988
1989err:
1990 return rc;
1991}
1992
1993static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1994{
1995 int ib_qp_acc_flags = 0;
1996
1997 if (params->incoming_rdma_write_en)
1998 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1999 if (params->incoming_rdma_read_en)
2000 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2001 if (params->incoming_atomic_en)
2002 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2003 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2004 return ib_qp_acc_flags;
2005}
2006
2007int qedr_query_qp(struct ib_qp *ibqp,
2008 struct ib_qp_attr *qp_attr,
2009 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2010{
2011 struct qed_rdma_query_qp_out_params params;
2012 struct qedr_qp *qp = get_qedr_qp(ibqp);
2013 struct qedr_dev *dev = qp->dev;
2014 int rc = 0;
2015
2016 memset(&params, 0, sizeof(params));
2017
2018 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2019 if (rc)
2020 goto err;
2021
2022 memset(qp_attr, 0, sizeof(*qp_attr));
2023 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2024
2025 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2026 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002027 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002028 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2029 qp_attr->rq_psn = params.rq_psn;
2030 qp_attr->sq_psn = params.sq_psn;
2031 qp_attr->dest_qp_num = params.dest_qp;
2032
2033 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2034
2035 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2036 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2037 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2038 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002039 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002040 qp_init_attr->cap = qp_attr->cap;
2041
2042 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
2043 sizeof(qp_attr->ah_attr.grh.dgid.raw));
2044
2045 qp_attr->ah_attr.grh.flow_label = params.flow_label;
2046 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
2047 qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
2048 qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
2049
2050 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
2051 qp_attr->ah_attr.port_num = 1;
2052 qp_attr->ah_attr.sl = 0;
2053 qp_attr->timeout = params.timeout;
2054 qp_attr->rnr_retry = params.rnr_retry;
2055 qp_attr->retry_cnt = params.retry_cnt;
2056 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2057 qp_attr->pkey_index = params.pkey_index;
2058 qp_attr->port_num = 1;
2059 qp_attr->ah_attr.src_path_bits = 0;
2060 qp_attr->ah_attr.static_rate = 0;
2061 qp_attr->alt_pkey_index = 0;
2062 qp_attr->alt_port_num = 0;
2063 qp_attr->alt_timeout = 0;
2064 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2065
2066 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2067 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2068 qp_attr->max_rd_atomic = params.max_rd_atomic;
2069 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2070
2071 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2072 qp_attr->cap.max_inline_data);
2073
2074err:
2075 return rc;
2076}
2077
2078int qedr_destroy_qp(struct ib_qp *ibqp)
2079{
2080 struct qedr_qp *qp = get_qedr_qp(ibqp);
2081 struct qedr_dev *dev = qp->dev;
2082 struct ib_qp_attr attr;
2083 int attr_mask = 0;
2084 int rc = 0;
2085
2086 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2087 qp, qp->qp_type);
2088
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002089 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2090 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2091 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2092
Ram Amranicecbcdd2016-10-10 13:15:34 +03002093 attr.qp_state = IB_QPS_ERR;
2094 attr_mask |= IB_QP_STATE;
2095
2096 /* Change the QP state to ERROR */
2097 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2098 }
2099
2100 if (qp->qp_type != IB_QPT_GSI) {
2101 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2102 if (rc)
2103 return rc;
Ram Amrani04886772016-10-10 13:15:38 +03002104 } else {
2105 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002106 }
2107
2108 if (ibqp->uobject && ibqp->uobject->context) {
2109 qedr_cleanup_user_sq(dev, qp);
2110 qedr_cleanup_user_rq(dev, qp);
2111 } else {
2112 qedr_cleanup_kernel_sq(dev, qp);
2113 qedr_cleanup_kernel_rq(dev, qp);
2114 }
2115
2116 kfree(qp);
2117
2118 return rc;
2119}
Ram Amranie0290cc2016-10-10 13:15:35 +03002120
Moni Shoua477864c2016-11-23 08:23:24 +02002121struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
2122 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002123{
2124 struct qedr_ah *ah;
2125
2126 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2127 if (!ah)
2128 return ERR_PTR(-ENOMEM);
2129
2130 ah->attr = *attr;
2131
2132 return &ah->ibah;
2133}
2134
2135int qedr_destroy_ah(struct ib_ah *ibah)
2136{
2137 struct qedr_ah *ah = get_qedr_ah(ibah);
2138
2139 kfree(ah);
2140 return 0;
2141}
2142
Ram Amranie0290cc2016-10-10 13:15:35 +03002143static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2144{
2145 struct qedr_pbl *pbl, *tmp;
2146
2147 if (info->pbl_table)
2148 list_add_tail(&info->pbl_table->list_entry,
2149 &info->free_pbl_list);
2150
2151 if (!list_empty(&info->inuse_pbl_list))
2152 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2153
2154 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2155 list_del(&pbl->list_entry);
2156 qedr_free_pbl(dev, &info->pbl_info, pbl);
2157 }
2158}
2159
2160static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2161 size_t page_list_len, bool two_layered)
2162{
2163 struct qedr_pbl *tmp;
2164 int rc;
2165
2166 INIT_LIST_HEAD(&info->free_pbl_list);
2167 INIT_LIST_HEAD(&info->inuse_pbl_list);
2168
2169 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2170 page_list_len, two_layered);
2171 if (rc)
2172 goto done;
2173
2174 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2175 if (!info->pbl_table) {
2176 rc = -ENOMEM;
2177 goto done;
2178 }
2179
2180 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2181 &info->pbl_table->pa);
2182
2183 /* in usual case we use 2 PBLs, so we add one to free
2184 * list and allocating another one
2185 */
2186 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2187 if (!tmp) {
2188 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2189 goto done;
2190 }
2191
2192 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2193
2194 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2195
2196done:
2197 if (rc)
2198 free_mr_info(dev, info);
2199
2200 return rc;
2201}
2202
2203struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2204 u64 usr_addr, int acc, struct ib_udata *udata)
2205{
2206 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2207 struct qedr_mr *mr;
2208 struct qedr_pd *pd;
2209 int rc = -ENOMEM;
2210
2211 pd = get_qedr_pd(ibpd);
2212 DP_DEBUG(dev, QEDR_MSG_MR,
2213 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2214 pd->pd_id, start, len, usr_addr, acc);
2215
2216 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2217 return ERR_PTR(-EINVAL);
2218
2219 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2220 if (!mr)
2221 return ERR_PTR(rc);
2222
2223 mr->type = QEDR_MR_USER;
2224
2225 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2226 if (IS_ERR(mr->umem)) {
2227 rc = -EFAULT;
2228 goto err0;
2229 }
2230
2231 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2232 if (rc)
2233 goto err1;
2234
2235 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2236 &mr->info.pbl_info);
2237
2238 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2239 if (rc) {
2240 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2241 goto err1;
2242 }
2243
2244 /* Index only, 18 bit long, lkey = itid << 8 | key */
2245 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2246 mr->hw_mr.key = 0;
2247 mr->hw_mr.pd = pd->pd_id;
2248 mr->hw_mr.local_read = 1;
2249 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2250 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2251 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2252 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2253 mr->hw_mr.mw_bind = false;
2254 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2255 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2256 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2257 mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2258 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2259 mr->hw_mr.length = len;
2260 mr->hw_mr.vaddr = usr_addr;
2261 mr->hw_mr.zbva = false;
2262 mr->hw_mr.phy_mr = false;
2263 mr->hw_mr.dma_mr = false;
2264
2265 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2266 if (rc) {
2267 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2268 goto err2;
2269 }
2270
2271 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2272 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2273 mr->hw_mr.remote_atomic)
2274 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2275
2276 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2277 mr->ibmr.lkey);
2278 return &mr->ibmr;
2279
2280err2:
2281 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2282err1:
2283 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2284err0:
2285 kfree(mr);
2286 return ERR_PTR(rc);
2287}
2288
2289int qedr_dereg_mr(struct ib_mr *ib_mr)
2290{
2291 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2292 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2293 int rc = 0;
2294
2295 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2296 if (rc)
2297 return rc;
2298
2299 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2300
2301 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2302 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2303
2304 /* it could be user registered memory. */
2305 if (mr->umem)
2306 ib_umem_release(mr->umem);
2307
2308 kfree(mr);
2309
2310 return rc;
2311}
2312
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002313static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2314 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002315{
2316 struct qedr_pd *pd = get_qedr_pd(ibpd);
2317 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2318 struct qedr_mr *mr;
2319 int rc = -ENOMEM;
2320
2321 DP_DEBUG(dev, QEDR_MSG_MR,
2322 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2323 max_page_list_len);
2324
2325 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2326 if (!mr)
2327 return ERR_PTR(rc);
2328
2329 mr->dev = dev;
2330 mr->type = QEDR_MR_FRMR;
2331
2332 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2333 if (rc)
2334 goto err0;
2335
2336 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2337 if (rc) {
2338 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2339 goto err0;
2340 }
2341
2342 /* Index only, 18 bit long, lkey = itid << 8 | key */
2343 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2344 mr->hw_mr.key = 0;
2345 mr->hw_mr.pd = pd->pd_id;
2346 mr->hw_mr.local_read = 1;
2347 mr->hw_mr.local_write = 0;
2348 mr->hw_mr.remote_read = 0;
2349 mr->hw_mr.remote_write = 0;
2350 mr->hw_mr.remote_atomic = 0;
2351 mr->hw_mr.mw_bind = false;
2352 mr->hw_mr.pbl_ptr = 0;
2353 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2354 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2355 mr->hw_mr.fbo = 0;
2356 mr->hw_mr.length = 0;
2357 mr->hw_mr.vaddr = 0;
2358 mr->hw_mr.zbva = false;
2359 mr->hw_mr.phy_mr = true;
2360 mr->hw_mr.dma_mr = false;
2361
2362 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2363 if (rc) {
2364 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2365 goto err1;
2366 }
2367
2368 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2369 mr->ibmr.rkey = mr->ibmr.lkey;
2370
2371 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2372 return mr;
2373
2374err1:
2375 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2376err0:
2377 kfree(mr);
2378 return ERR_PTR(rc);
2379}
2380
2381struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2382 enum ib_mr_type mr_type, u32 max_num_sg)
2383{
2384 struct qedr_dev *dev;
2385 struct qedr_mr *mr;
2386
2387 if (mr_type != IB_MR_TYPE_MEM_REG)
2388 return ERR_PTR(-EINVAL);
2389
2390 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2391
2392 if (IS_ERR(mr))
2393 return ERR_PTR(-EINVAL);
2394
2395 dev = mr->dev;
2396
2397 return &mr->ibmr;
2398}
2399
2400static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2401{
2402 struct qedr_mr *mr = get_qedr_mr(ibmr);
2403 struct qedr_pbl *pbl_table;
2404 struct regpair *pbe;
2405 u32 pbes_in_page;
2406
2407 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2408 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2409 return -ENOMEM;
2410 }
2411
2412 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2413 mr->npages, addr);
2414
2415 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2416 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2417 pbe = (struct regpair *)pbl_table->va;
2418 pbe += mr->npages % pbes_in_page;
2419 pbe->lo = cpu_to_le32((u32)addr);
2420 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2421
2422 mr->npages++;
2423
2424 return 0;
2425}
2426
2427static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2428{
2429 int work = info->completed - info->completed_handled - 1;
2430
2431 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2432 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2433 struct qedr_pbl *pbl;
2434
2435 /* Free all the page list that are possible to be freed
2436 * (all the ones that were invalidated), under the assumption
2437 * that if an FMR was completed successfully that means that
2438 * if there was an invalidate operation before it also ended
2439 */
2440 pbl = list_first_entry(&info->inuse_pbl_list,
2441 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002442 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002443 info->completed_handled++;
2444 }
2445}
2446
2447int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2448 int sg_nents, unsigned int *sg_offset)
2449{
2450 struct qedr_mr *mr = get_qedr_mr(ibmr);
2451
2452 mr->npages = 0;
2453
2454 handle_completed_mrs(mr->dev, &mr->info);
2455 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2456}
2457
2458struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2459{
2460 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2461 struct qedr_pd *pd = get_qedr_pd(ibpd);
2462 struct qedr_mr *mr;
2463 int rc;
2464
2465 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2466 if (!mr)
2467 return ERR_PTR(-ENOMEM);
2468
2469 mr->type = QEDR_MR_DMA;
2470
2471 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2472 if (rc) {
2473 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2474 goto err1;
2475 }
2476
2477 /* index only, 18 bit long, lkey = itid << 8 | key */
2478 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2479 mr->hw_mr.pd = pd->pd_id;
2480 mr->hw_mr.local_read = 1;
2481 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2482 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2483 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2484 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2485 mr->hw_mr.dma_mr = true;
2486
2487 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2488 if (rc) {
2489 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2490 goto err2;
2491 }
2492
2493 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2494 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2495 mr->hw_mr.remote_atomic)
2496 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2497
2498 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2499 return &mr->ibmr;
2500
2501err2:
2502 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2503err1:
2504 kfree(mr);
2505 return ERR_PTR(rc);
2506}
Ram Amraniafa0e132016-10-10 13:15:36 +03002507
2508static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2509{
2510 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2511}
2512
2513static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2514{
2515 int i, len = 0;
2516
2517 for (i = 0; i < num_sge; i++)
2518 len += sg_list[i].length;
2519
2520 return len;
2521}
2522
2523static void swap_wqe_data64(u64 *p)
2524{
2525 int i;
2526
2527 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2528 *p = cpu_to_be64(cpu_to_le64(*p));
2529}
2530
2531static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2532 struct qedr_qp *qp, u8 *wqe_size,
2533 struct ib_send_wr *wr,
2534 struct ib_send_wr **bad_wr, u8 *bits,
2535 u8 bit)
2536{
2537 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2538 char *seg_prt, *wqe;
2539 int i, seg_siz;
2540
2541 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2542 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2543 *bad_wr = wr;
2544 return 0;
2545 }
2546
2547 if (!data_size)
2548 return data_size;
2549
2550 *bits |= bit;
2551
2552 seg_prt = NULL;
2553 wqe = NULL;
2554 seg_siz = 0;
2555
2556 /* Copy data inline */
2557 for (i = 0; i < wr->num_sge; i++) {
2558 u32 len = wr->sg_list[i].length;
2559 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2560
2561 while (len > 0) {
2562 u32 cur;
2563
2564 /* New segment required */
2565 if (!seg_siz) {
2566 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2567 seg_prt = wqe;
2568 seg_siz = sizeof(struct rdma_sq_common_wqe);
2569 (*wqe_size)++;
2570 }
2571
2572 /* Calculate currently allowed length */
2573 cur = min_t(u32, len, seg_siz);
2574 memcpy(seg_prt, src, cur);
2575
2576 /* Update segment variables */
2577 seg_prt += cur;
2578 seg_siz -= cur;
2579
2580 /* Update sge variables */
2581 src += cur;
2582 len -= cur;
2583
2584 /* Swap fully-completed segments */
2585 if (!seg_siz)
2586 swap_wqe_data64((u64 *)wqe);
2587 }
2588 }
2589
2590 /* swap last not completed segment */
2591 if (seg_siz)
2592 swap_wqe_data64((u64 *)wqe);
2593
2594 return data_size;
2595}
2596
2597#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2598 do { \
2599 DMA_REGPAIR_LE(sge->addr, vaddr); \
2600 (sge)->length = cpu_to_le32(vlength); \
2601 (sge)->flags = cpu_to_le32(vflags); \
2602 } while (0)
2603
2604#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2605 do { \
2606 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2607 (hdr)->num_sges = num_sge; \
2608 } while (0)
2609
2610#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2611 do { \
2612 DMA_REGPAIR_LE(sge->addr, vaddr); \
2613 (sge)->length = cpu_to_le32(vlength); \
2614 (sge)->l_key = cpu_to_le32(vlkey); \
2615 } while (0)
2616
2617static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2618 struct ib_send_wr *wr)
2619{
2620 u32 data_size = 0;
2621 int i;
2622
2623 for (i = 0; i < wr->num_sge; i++) {
2624 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2625
2626 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2627 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2628 sge->length = cpu_to_le32(wr->sg_list[i].length);
2629 data_size += wr->sg_list[i].length;
2630 }
2631
2632 if (wqe_size)
2633 *wqe_size += wr->num_sge;
2634
2635 return data_size;
2636}
2637
2638static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2639 struct qedr_qp *qp,
2640 struct rdma_sq_rdma_wqe_1st *rwqe,
2641 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2642 struct ib_send_wr *wr,
2643 struct ib_send_wr **bad_wr)
2644{
2645 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2646 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2647
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002648 if (wr->send_flags & IB_SEND_INLINE &&
2649 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2650 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002651 u8 flags = 0;
2652
2653 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2654 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2655 bad_wr, &rwqe->flags, flags);
2656 }
2657
2658 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2659}
2660
2661static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2662 struct qedr_qp *qp,
2663 struct rdma_sq_send_wqe_1st *swqe,
2664 struct rdma_sq_send_wqe_2st *swqe2,
2665 struct ib_send_wr *wr,
2666 struct ib_send_wr **bad_wr)
2667{
2668 memset(swqe2, 0, sizeof(*swqe2));
2669 if (wr->send_flags & IB_SEND_INLINE) {
2670 u8 flags = 0;
2671
2672 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2673 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2674 bad_wr, &swqe->flags, flags);
2675 }
2676
2677 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2678}
2679
2680static int qedr_prepare_reg(struct qedr_qp *qp,
2681 struct rdma_sq_fmr_wqe_1st *fwqe1,
2682 struct ib_reg_wr *wr)
2683{
2684 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2685 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2686
2687 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2688 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2689 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2690 fwqe1->l_key = wr->key;
2691
2692 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2693 !!(wr->access & IB_ACCESS_REMOTE_READ));
2694 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2695 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2696 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2697 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2698 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2699 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2700 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2701 fwqe2->fmr_ctrl = 0;
2702
2703 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2704 ilog2(mr->ibmr.page_size) - 12);
2705
2706 fwqe2->length_hi = 0;
2707 fwqe2->length_lo = mr->ibmr.length;
2708 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2709 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2710
2711 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2712
2713 return 0;
2714}
2715
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002716static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002717{
2718 switch (opcode) {
2719 case IB_WR_RDMA_WRITE:
2720 case IB_WR_RDMA_WRITE_WITH_IMM:
2721 return IB_WC_RDMA_WRITE;
2722 case IB_WR_SEND_WITH_IMM:
2723 case IB_WR_SEND:
2724 case IB_WR_SEND_WITH_INV:
2725 return IB_WC_SEND;
2726 case IB_WR_RDMA_READ:
2727 return IB_WC_RDMA_READ;
2728 case IB_WR_ATOMIC_CMP_AND_SWP:
2729 return IB_WC_COMP_SWAP;
2730 case IB_WR_ATOMIC_FETCH_AND_ADD:
2731 return IB_WC_FETCH_ADD;
2732 case IB_WR_REG_MR:
2733 return IB_WC_REG_MR;
2734 case IB_WR_LOCAL_INV:
2735 return IB_WC_LOCAL_INV;
2736 default:
2737 return IB_WC_SEND;
2738 }
2739}
2740
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002741static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002742{
2743 int wq_is_full, err_wr, pbl_is_full;
2744 struct qedr_dev *dev = qp->dev;
2745
2746 /* prevent SQ overflow and/or processing of a bad WR */
2747 err_wr = wr->num_sge > qp->sq.max_sges;
2748 wq_is_full = qedr_wq_is_full(&qp->sq);
2749 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2750 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2751 if (wq_is_full || err_wr || pbl_is_full) {
2752 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2753 DP_ERR(dev,
2754 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2755 qp);
2756 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2757 }
2758
2759 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2760 DP_ERR(dev,
2761 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2762 qp);
2763 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2764 }
2765
2766 if (pbl_is_full &&
2767 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2768 DP_ERR(dev,
2769 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2770 qp);
2771 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2772 }
2773 return false;
2774 }
2775 return true;
2776}
2777
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002778static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03002779 struct ib_send_wr **bad_wr)
2780{
2781 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2782 struct qedr_qp *qp = get_qedr_qp(ibqp);
2783 struct rdma_sq_atomic_wqe_1st *awqe1;
2784 struct rdma_sq_atomic_wqe_2nd *awqe2;
2785 struct rdma_sq_atomic_wqe_3rd *awqe3;
2786 struct rdma_sq_send_wqe_2st *swqe2;
2787 struct rdma_sq_local_inv_wqe *iwqe;
2788 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2789 struct rdma_sq_send_wqe_1st *swqe;
2790 struct rdma_sq_rdma_wqe_1st *rwqe;
2791 struct rdma_sq_fmr_wqe_1st *fwqe1;
2792 struct rdma_sq_common_wqe *wqe;
2793 u32 length;
2794 int rc = 0;
2795 bool comp;
2796
2797 if (!qedr_can_post_send(qp, wr)) {
2798 *bad_wr = wr;
2799 return -ENOMEM;
2800 }
2801
2802 wqe = qed_chain_produce(&qp->sq.pbl);
2803 qp->wqe_wr_id[qp->sq.prod].signaled =
2804 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2805
2806 wqe->flags = 0;
2807 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2808 !!(wr->send_flags & IB_SEND_SOLICITED));
2809 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2810 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2811 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2812 !!(wr->send_flags & IB_SEND_FENCE));
2813 wqe->prev_wqe_size = qp->prev_wqe_size;
2814
2815 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2816
2817 switch (wr->opcode) {
2818 case IB_WR_SEND_WITH_IMM:
2819 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2820 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2821 swqe->wqe_size = 2;
2822 swqe2 = qed_chain_produce(&qp->sq.pbl);
2823
2824 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2825 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2826 wr, bad_wr);
2827 swqe->length = cpu_to_le32(length);
2828 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2829 qp->prev_wqe_size = swqe->wqe_size;
2830 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2831 break;
2832 case IB_WR_SEND:
2833 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2834 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2835
2836 swqe->wqe_size = 2;
2837 swqe2 = qed_chain_produce(&qp->sq.pbl);
2838 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2839 wr, bad_wr);
2840 swqe->length = cpu_to_le32(length);
2841 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2842 qp->prev_wqe_size = swqe->wqe_size;
2843 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2844 break;
2845 case IB_WR_SEND_WITH_INV:
2846 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2847 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2848 swqe2 = qed_chain_produce(&qp->sq.pbl);
2849 swqe->wqe_size = 2;
2850 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2851 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2852 wr, bad_wr);
2853 swqe->length = cpu_to_le32(length);
2854 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2855 qp->prev_wqe_size = swqe->wqe_size;
2856 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2857 break;
2858
2859 case IB_WR_RDMA_WRITE_WITH_IMM:
2860 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2861 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2862
2863 rwqe->wqe_size = 2;
2864 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2865 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2866 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2867 wr, bad_wr);
2868 rwqe->length = cpu_to_le32(length);
2869 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2870 qp->prev_wqe_size = rwqe->wqe_size;
2871 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2872 break;
2873 case IB_WR_RDMA_WRITE:
2874 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2875 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2876
2877 rwqe->wqe_size = 2;
2878 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2879 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2880 wr, bad_wr);
2881 rwqe->length = cpu_to_le32(length);
2882 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2883 qp->prev_wqe_size = rwqe->wqe_size;
2884 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2885 break;
2886 case IB_WR_RDMA_READ_WITH_INV:
2887 DP_ERR(dev,
2888 "RDMA READ WITH INVALIDATE not supported\n");
2889 *bad_wr = wr;
2890 rc = -EINVAL;
2891 break;
2892
2893 case IB_WR_RDMA_READ:
2894 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2895 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2896
2897 rwqe->wqe_size = 2;
2898 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2899 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2900 wr, bad_wr);
2901 rwqe->length = cpu_to_le32(length);
2902 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2903 qp->prev_wqe_size = rwqe->wqe_size;
2904 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2905 break;
2906
2907 case IB_WR_ATOMIC_CMP_AND_SWP:
2908 case IB_WR_ATOMIC_FETCH_AND_ADD:
2909 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2910 awqe1->wqe_size = 4;
2911
2912 awqe2 = qed_chain_produce(&qp->sq.pbl);
2913 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2914 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2915
2916 awqe3 = qed_chain_produce(&qp->sq.pbl);
2917
2918 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2919 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2920 DMA_REGPAIR_LE(awqe3->swap_data,
2921 atomic_wr(wr)->compare_add);
2922 } else {
2923 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2924 DMA_REGPAIR_LE(awqe3->swap_data,
2925 atomic_wr(wr)->swap);
2926 DMA_REGPAIR_LE(awqe3->cmp_data,
2927 atomic_wr(wr)->compare_add);
2928 }
2929
2930 qedr_prepare_sq_sges(qp, NULL, wr);
2931
2932 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2933 qp->prev_wqe_size = awqe1->wqe_size;
2934 break;
2935
2936 case IB_WR_LOCAL_INV:
2937 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2938 iwqe->wqe_size = 1;
2939
2940 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2941 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2942 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2943 qp->prev_wqe_size = iwqe->wqe_size;
2944 break;
2945 case IB_WR_REG_MR:
2946 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2947 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2948 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2949 fwqe1->wqe_size = 2;
2950
2951 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2952 if (rc) {
2953 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2954 *bad_wr = wr;
2955 break;
2956 }
2957
2958 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2959 qp->prev_wqe_size = fwqe1->wqe_size;
2960 break;
2961 default:
2962 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2963 rc = -EINVAL;
2964 *bad_wr = wr;
2965 break;
2966 }
2967
2968 if (*bad_wr) {
2969 u16 value;
2970
2971 /* Restore prod to its position before
2972 * this WR was processed
2973 */
2974 value = le16_to_cpu(qp->sq.db_data.data.value);
2975 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2976
2977 /* Restore prev_wqe_size */
2978 qp->prev_wqe_size = wqe->prev_wqe_size;
2979 rc = -EINVAL;
2980 DP_ERR(dev, "POST SEND FAILED\n");
2981 }
2982
2983 return rc;
2984}
2985
2986int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2987 struct ib_send_wr **bad_wr)
2988{
2989 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2990 struct qedr_qp *qp = get_qedr_qp(ibqp);
2991 unsigned long flags;
2992 int rc = 0;
2993
2994 *bad_wr = NULL;
2995
Ram Amrani04886772016-10-10 13:15:38 +03002996 if (qp->qp_type == IB_QPT_GSI)
2997 return qedr_gsi_post_send(ibqp, wr, bad_wr);
2998
Ram Amraniafa0e132016-10-10 13:15:36 +03002999 spin_lock_irqsave(&qp->q_lock, flags);
3000
Amrani, Ram922d9a42016-12-22 14:40:38 +02003001 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3002 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3003 (qp->state != QED_ROCE_QP_STATE_SQD)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003004 spin_unlock_irqrestore(&qp->q_lock, flags);
3005 *bad_wr = wr;
3006 DP_DEBUG(dev, QEDR_MSG_CQ,
3007 "QP in wrong state! QP icid=0x%x state %d\n",
3008 qp->icid, qp->state);
3009 return -EINVAL;
3010 }
3011
Ram Amraniafa0e132016-10-10 13:15:36 +03003012 while (wr) {
3013 rc = __qedr_post_send(ibqp, wr, bad_wr);
3014 if (rc)
3015 break;
3016
3017 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3018
3019 qedr_inc_sw_prod(&qp->sq);
3020
3021 qp->sq.db_data.data.value++;
3022
3023 wr = wr->next;
3024 }
3025
3026 /* Trigger doorbell
3027 * If there was a failure in the first WR then it will be triggered in
3028 * vane. However this is not harmful (as long as the producer value is
3029 * unchanged). For performance reasons we avoid checking for this
3030 * redundant doorbell.
3031 */
3032 wmb();
3033 writel(qp->sq.db_data.raw, qp->sq.db);
3034
3035 /* Make sure write sticks */
3036 mmiowb();
3037
3038 spin_unlock_irqrestore(&qp->q_lock, flags);
3039
3040 return rc;
3041}
3042
3043int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3044 struct ib_recv_wr **bad_wr)
3045{
3046 struct qedr_qp *qp = get_qedr_qp(ibqp);
3047 struct qedr_dev *dev = qp->dev;
3048 unsigned long flags;
3049 int status = 0;
3050
Ram Amrani04886772016-10-10 13:15:38 +03003051 if (qp->qp_type == IB_QPT_GSI)
3052 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3053
Ram Amraniafa0e132016-10-10 13:15:36 +03003054 spin_lock_irqsave(&qp->q_lock, flags);
3055
Amrani, Ram922d9a42016-12-22 14:40:38 +02003056 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003057 spin_unlock_irqrestore(&qp->q_lock, flags);
3058 *bad_wr = wr;
3059 return -EINVAL;
3060 }
3061
3062 while (wr) {
3063 int i;
3064
3065 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3066 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3067 wr->num_sge > qp->rq.max_sges) {
3068 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3069 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3070 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3071 qp->rq.max_sges);
3072 status = -ENOMEM;
3073 *bad_wr = wr;
3074 break;
3075 }
3076 for (i = 0; i < wr->num_sge; i++) {
3077 u32 flags = 0;
3078 struct rdma_rq_sge *rqe =
3079 qed_chain_produce(&qp->rq.pbl);
3080
3081 /* First one must include the number
3082 * of SGE in the list
3083 */
3084 if (!i)
3085 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3086 wr->num_sge);
3087
3088 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3089 wr->sg_list[i].lkey);
3090
3091 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3092 wr->sg_list[i].length, flags);
3093 }
3094
3095 /* Special case of no sges. FW requires between 1-4 sges...
3096 * in this case we need to post 1 sge with length zero. this is
3097 * because rdma write with immediate consumes an RQ.
3098 */
3099 if (!wr->num_sge) {
3100 u32 flags = 0;
3101 struct rdma_rq_sge *rqe =
3102 qed_chain_produce(&qp->rq.pbl);
3103
3104 /* First one must include the number
3105 * of SGE in the list
3106 */
3107 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3108 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3109
3110 RQ_SGE_SET(rqe, 0, 0, flags);
3111 i = 1;
3112 }
3113
3114 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3115 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3116
3117 qedr_inc_sw_prod(&qp->rq);
3118
3119 /* Flush all the writes before signalling doorbell */
3120 wmb();
3121
3122 qp->rq.db_data.data.value++;
3123
3124 writel(qp->rq.db_data.raw, qp->rq.db);
3125
3126 /* Make sure write sticks */
3127 mmiowb();
3128
3129 wr = wr->next;
3130 }
3131
3132 spin_unlock_irqrestore(&qp->q_lock, flags);
3133
3134 return status;
3135}
3136
3137static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3138{
3139 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3140
3141 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3142 cq->pbl_toggle;
3143}
3144
3145static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3146{
3147 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3148 struct qedr_qp *qp;
3149
3150 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3151 resp_cqe->qp_handle.lo,
3152 u64);
3153 return qp;
3154}
3155
3156static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3157{
3158 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3159
3160 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3161}
3162
3163/* Return latest CQE (needs processing) */
3164static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3165{
3166 return cq->latest_cqe;
3167}
3168
3169/* In fmr we need to increase the number of fmr completed counter for the fmr
3170 * algorithm determining whether we can free a pbl or not.
3171 * we need to perform this whether the work request was signaled or not. for
3172 * this purpose we call this function from the condition that checks if a wr
3173 * should be skipped, to make sure we don't miss it ( possibly this fmr
3174 * operation was not signalted)
3175 */
3176static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3177{
3178 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3179 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3180}
3181
3182static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3183 struct qedr_cq *cq, int num_entries,
3184 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3185 int force)
3186{
3187 u16 cnt = 0;
3188
3189 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3190 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3191 qedr_chk_if_fmr(qp);
3192 /* skip WC */
3193 goto next_cqe;
3194 }
3195
3196 /* fill WC */
3197 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003198 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003199 wc->wc_flags = 0;
3200 wc->src_qp = qp->id;
3201 wc->qp = &qp->ibqp;
3202
3203 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3204 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3205
3206 switch (wc->opcode) {
3207 case IB_WC_RDMA_WRITE:
3208 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3209 break;
3210 case IB_WC_COMP_SWAP:
3211 case IB_WC_FETCH_ADD:
3212 wc->byte_len = 8;
3213 break;
3214 case IB_WC_REG_MR:
3215 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3216 break;
3217 default:
3218 break;
3219 }
3220
3221 num_entries--;
3222 wc++;
3223 cnt++;
3224next_cqe:
3225 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3226 qed_chain_consume(&qp->sq.pbl);
3227 qedr_inc_sw_cons(&qp->sq);
3228 }
3229
3230 return cnt;
3231}
3232
3233static int qedr_poll_cq_req(struct qedr_dev *dev,
3234 struct qedr_qp *qp, struct qedr_cq *cq,
3235 int num_entries, struct ib_wc *wc,
3236 struct rdma_cqe_requester *req)
3237{
3238 int cnt = 0;
3239
3240 switch (req->status) {
3241 case RDMA_CQE_REQ_STS_OK:
3242 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3243 IB_WC_SUCCESS, 0);
3244 break;
3245 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003246 if (qp->state != QED_ROCE_QP_STATE_ERR)
3247 DP_ERR(dev,
3248 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3249 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003250 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003251 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003252 break;
3253 default:
3254 /* process all WQE before the cosumer */
3255 qp->state = QED_ROCE_QP_STATE_ERR;
3256 cnt = process_req(dev, qp, cq, num_entries, wc,
3257 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3258 wc += cnt;
3259 /* if we have extra WC fill it with actual error info */
3260 if (cnt < num_entries) {
3261 enum ib_wc_status wc_status;
3262
3263 switch (req->status) {
3264 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3265 DP_ERR(dev,
3266 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3267 cq->icid, qp->icid);
3268 wc_status = IB_WC_BAD_RESP_ERR;
3269 break;
3270 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3271 DP_ERR(dev,
3272 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3273 cq->icid, qp->icid);
3274 wc_status = IB_WC_LOC_LEN_ERR;
3275 break;
3276 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3277 DP_ERR(dev,
3278 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3279 cq->icid, qp->icid);
3280 wc_status = IB_WC_LOC_QP_OP_ERR;
3281 break;
3282 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3283 DP_ERR(dev,
3284 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3285 cq->icid, qp->icid);
3286 wc_status = IB_WC_LOC_PROT_ERR;
3287 break;
3288 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3289 DP_ERR(dev,
3290 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3291 cq->icid, qp->icid);
3292 wc_status = IB_WC_MW_BIND_ERR;
3293 break;
3294 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3295 DP_ERR(dev,
3296 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3297 cq->icid, qp->icid);
3298 wc_status = IB_WC_REM_INV_REQ_ERR;
3299 break;
3300 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3301 DP_ERR(dev,
3302 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3303 cq->icid, qp->icid);
3304 wc_status = IB_WC_REM_ACCESS_ERR;
3305 break;
3306 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3307 DP_ERR(dev,
3308 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3309 cq->icid, qp->icid);
3310 wc_status = IB_WC_REM_OP_ERR;
3311 break;
3312 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3313 DP_ERR(dev,
3314 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3315 cq->icid, qp->icid);
3316 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3317 break;
3318 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3319 DP_ERR(dev,
3320 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3321 cq->icid, qp->icid);
3322 wc_status = IB_WC_RETRY_EXC_ERR;
3323 break;
3324 default:
3325 DP_ERR(dev,
3326 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3327 cq->icid, qp->icid);
3328 wc_status = IB_WC_GENERAL_ERR;
3329 }
3330 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3331 wc_status, 1);
3332 }
3333 }
3334
3335 return cnt;
3336}
3337
3338static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3339 struct qedr_cq *cq, struct ib_wc *wc,
3340 struct rdma_cqe_responder *resp, u64 wr_id)
3341{
3342 enum ib_wc_status wc_status = IB_WC_SUCCESS;
3343 u8 flags;
3344
3345 wc->opcode = IB_WC_RECV;
3346 wc->wc_flags = 0;
3347
3348 switch (resp->status) {
3349 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3350 wc_status = IB_WC_LOC_ACCESS_ERR;
3351 break;
3352 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3353 wc_status = IB_WC_LOC_LEN_ERR;
3354 break;
3355 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3356 wc_status = IB_WC_LOC_QP_OP_ERR;
3357 break;
3358 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3359 wc_status = IB_WC_LOC_PROT_ERR;
3360 break;
3361 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3362 wc_status = IB_WC_MW_BIND_ERR;
3363 break;
3364 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3365 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3366 break;
3367 case RDMA_CQE_RESP_STS_OK:
3368 wc_status = IB_WC_SUCCESS;
3369 wc->byte_len = le32_to_cpu(resp->length);
3370
3371 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3372
3373 if (flags == QEDR_RESP_RDMA_IMM)
3374 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3375
3376 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3377 wc->ex.imm_data =
3378 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3379 wc->wc_flags |= IB_WC_WITH_IMM;
3380 }
3381 break;
3382 default:
3383 wc->status = IB_WC_GENERAL_ERR;
3384 DP_ERR(dev, "Invalid CQE status detected\n");
3385 }
3386
3387 /* fill WC */
3388 wc->status = wc_status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003389 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003390 wc->src_qp = qp->id;
3391 wc->qp = &qp->ibqp;
3392 wc->wr_id = wr_id;
3393}
3394
3395static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3396 struct qedr_cq *cq, struct ib_wc *wc,
3397 struct rdma_cqe_responder *resp)
3398{
3399 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3400
3401 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3402
3403 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3404 qed_chain_consume(&qp->rq.pbl);
3405 qedr_inc_sw_cons(&qp->rq);
3406
3407 return 1;
3408}
3409
3410static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3411 int num_entries, struct ib_wc *wc, u16 hw_cons)
3412{
3413 u16 cnt = 0;
3414
3415 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3416 /* fill WC */
3417 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003418 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003419 wc->wc_flags = 0;
3420 wc->src_qp = qp->id;
3421 wc->byte_len = 0;
3422 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3423 wc->qp = &qp->ibqp;
3424 num_entries--;
3425 wc++;
3426 cnt++;
3427 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3428 qed_chain_consume(&qp->rq.pbl);
3429 qedr_inc_sw_cons(&qp->rq);
3430 }
3431
3432 return cnt;
3433}
3434
3435static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3436 struct rdma_cqe_responder *resp, int *update)
3437{
3438 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3439 consume_cqe(cq);
3440 *update |= 1;
3441 }
3442}
3443
3444static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3445 struct qedr_cq *cq, int num_entries,
3446 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3447 int *update)
3448{
3449 int cnt;
3450
3451 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3452 cnt = process_resp_flush(qp, cq, num_entries, wc,
3453 resp->rq_cons);
3454 try_consume_resp_cqe(cq, qp, resp, update);
3455 } else {
3456 cnt = process_resp_one(dev, qp, cq, wc, resp);
3457 consume_cqe(cq);
3458 *update |= 1;
3459 }
3460
3461 return cnt;
3462}
3463
3464static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3465 struct rdma_cqe_requester *req, int *update)
3466{
3467 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3468 consume_cqe(cq);
3469 *update |= 1;
3470 }
3471}
3472
3473int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3474{
3475 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3476 struct qedr_cq *cq = get_qedr_cq(ibcq);
3477 union rdma_cqe *cqe = cq->latest_cqe;
3478 u32 old_cons, new_cons;
3479 unsigned long flags;
3480 int update = 0;
3481 int done = 0;
3482
Ram Amrani04886772016-10-10 13:15:38 +03003483 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3484 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3485
Ram Amraniafa0e132016-10-10 13:15:36 +03003486 spin_lock_irqsave(&cq->cq_lock, flags);
3487 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3488 while (num_entries && is_valid_cqe(cq, cqe)) {
3489 struct qedr_qp *qp;
3490 int cnt = 0;
3491
3492 /* prevent speculative reads of any field of CQE */
3493 rmb();
3494
3495 qp = cqe_get_qp(cqe);
3496 if (!qp) {
3497 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3498 break;
3499 }
3500
3501 wc->qp = &qp->ibqp;
3502
3503 switch (cqe_get_type(cqe)) {
3504 case RDMA_CQE_TYPE_REQUESTER:
3505 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3506 &cqe->req);
3507 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3508 break;
3509 case RDMA_CQE_TYPE_RESPONDER_RQ:
3510 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3511 &cqe->resp, &update);
3512 break;
3513 case RDMA_CQE_TYPE_INVALID:
3514 default:
3515 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3516 cqe_get_type(cqe));
3517 }
3518 num_entries -= cnt;
3519 wc += cnt;
3520 done += cnt;
3521
3522 cqe = get_cqe(cq);
3523 }
3524 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3525
3526 cq->cq_cons += new_cons - old_cons;
3527
3528 if (update)
3529 /* doorbell notifies abount latest VALID entry,
3530 * but chain already point to the next INVALID one
3531 */
3532 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3533
3534 spin_unlock_irqrestore(&cq->cq_lock, flags);
3535 return done;
3536}
Ram Amrani993d1b52016-10-10 13:15:39 +03003537
3538int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3539 u8 port_num,
3540 const struct ib_wc *in_wc,
3541 const struct ib_grh *in_grh,
3542 const struct ib_mad_hdr *mad_hdr,
3543 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3544 size_t *out_mad_size, u16 *out_mad_pkey_index)
3545{
3546 struct qedr_dev *dev = get_qedr_dev(ibdev);
3547
3548 DP_DEBUG(dev, QEDR_MSG_GSI,
3549 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3550 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3551 mad_hdr->class_specific, mad_hdr->class_version,
3552 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3553 return IB_MAD_RESULT_SUCCESS;
3554}
3555
3556int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3557 struct ib_port_immutable *immutable)
3558{
3559 struct ib_port_attr attr;
3560 int err;
3561
3562 err = qedr_query_port(ibdev, port_num, &attr);
3563 if (err)
3564 return err;
3565
3566 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3567 immutable->gid_tbl_len = attr.gid_tbl_len;
3568 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3569 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3570 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3571
3572 return 0;
3573}