blob: cd0408c2b376f3056e26a23963002c048125f1b1 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
46#include "qedr_hsi.h"
47#include <linux/qed/qed_if.h>
48#include "qedr.h"
49#include "verbs.h"
50#include <rdma/qedr-abi.h>
Ram Amranicecbcdd2016-10-10 13:15:34 +030051#include "qedr_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030052
Ram Amrania7efd772016-10-10 13:15:33 +030053#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
54
55int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
56{
57 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
58 return -EINVAL;
59
60 *pkey = QEDR_ROCE_PKEY_DEFAULT;
61 return 0;
62}
63
Ram Amraniac1b36e2016-10-10 13:15:32 +030064int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
65 union ib_gid *sgid)
66{
67 struct qedr_dev *dev = get_qedr_dev(ibdev);
68 int rc = 0;
69
70 if (!rdma_cap_roce_gid_table(ibdev, port))
71 return -ENODEV;
72
73 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
74 if (rc == -EAGAIN) {
75 memcpy(sgid, &zgid, sizeof(*sgid));
76 return 0;
77 }
78
79 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80 sgid->global.interface_id, sgid->global.subnet_prefix);
81
82 return rc;
83}
84
85int qedr_add_gid(struct ib_device *device, u8 port_num,
86 unsigned int index, const union ib_gid *gid,
87 const struct ib_gid_attr *attr, void **context)
88{
89 if (!rdma_cap_roce_gid_table(device, port_num))
90 return -EINVAL;
91
92 if (port_num > QEDR_MAX_PORT)
93 return -EINVAL;
94
95 if (!context)
96 return -EINVAL;
97
98 return 0;
99}
100
101int qedr_del_gid(struct ib_device *device, u8 port_num,
102 unsigned int index, void **context)
103{
104 if (!rdma_cap_roce_gid_table(device, port_num))
105 return -EINVAL;
106
107 if (port_num > QEDR_MAX_PORT)
108 return -EINVAL;
109
110 if (!context)
111 return -EINVAL;
112
113 return 0;
114}
115
116int qedr_query_device(struct ib_device *ibdev,
117 struct ib_device_attr *attr, struct ib_udata *udata)
118{
119 struct qedr_dev *dev = get_qedr_dev(ibdev);
120 struct qedr_device_attr *qattr = &dev->attr;
121
122 if (!dev->rdma_ctx) {
123 DP_ERR(dev,
124 "qedr_query_device called with invalid params rdma_ctx=%p\n",
125 dev->rdma_ctx);
126 return -EINVAL;
127 }
128
129 memset(attr, 0, sizeof(*attr));
130
131 attr->fw_ver = qattr->fw_ver;
132 attr->sys_image_guid = qattr->sys_image_guid;
133 attr->max_mr_size = qattr->max_mr_size;
134 attr->page_size_cap = qattr->page_size_caps;
135 attr->vendor_id = qattr->vendor_id;
136 attr->vendor_part_id = qattr->vendor_part_id;
137 attr->hw_ver = qattr->hw_ver;
138 attr->max_qp = qattr->max_qp;
139 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141 IB_DEVICE_RC_RNR_NAK_GEN |
142 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
143
144 attr->max_sge = qattr->max_sge;
145 attr->max_sge_rd = qattr->max_sge;
146 attr->max_cq = qattr->max_cq;
147 attr->max_cqe = qattr->max_cqe;
148 attr->max_mr = qattr->max_mr;
149 attr->max_mw = qattr->max_mw;
150 attr->max_pd = qattr->max_pd;
151 attr->atomic_cap = dev->atomic_cap;
152 attr->max_fmr = qattr->max_fmr;
153 attr->max_map_per_fmr = 16;
154 attr->max_qp_init_rd_atom =
155 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156 attr->max_qp_rd_atom =
157 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158 attr->max_qp_init_rd_atom);
159
160 attr->max_srq = qattr->max_srq;
161 attr->max_srq_sge = qattr->max_srq_sge;
162 attr->max_srq_wr = qattr->max_srq_wr;
163
164 attr->local_ca_ack_delay = qattr->dev_ack_delay;
165 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167 attr->max_ah = qattr->max_ah;
168
169 return 0;
170}
171
172#define QEDR_SPEED_SDR (1)
173#define QEDR_SPEED_DDR (2)
174#define QEDR_SPEED_QDR (4)
175#define QEDR_SPEED_FDR10 (8)
176#define QEDR_SPEED_FDR (16)
177#define QEDR_SPEED_EDR (32)
178
179static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
180 u8 *ib_width)
181{
182 switch (speed) {
183 case 1000:
184 *ib_speed = QEDR_SPEED_SDR;
185 *ib_width = IB_WIDTH_1X;
186 break;
187 case 10000:
188 *ib_speed = QEDR_SPEED_QDR;
189 *ib_width = IB_WIDTH_1X;
190 break;
191
192 case 20000:
193 *ib_speed = QEDR_SPEED_DDR;
194 *ib_width = IB_WIDTH_4X;
195 break;
196
197 case 25000:
198 *ib_speed = QEDR_SPEED_EDR;
199 *ib_width = IB_WIDTH_1X;
200 break;
201
202 case 40000:
203 *ib_speed = QEDR_SPEED_QDR;
204 *ib_width = IB_WIDTH_4X;
205 break;
206
207 case 50000:
208 *ib_speed = QEDR_SPEED_QDR;
209 *ib_width = IB_WIDTH_4X;
210 break;
211
212 case 100000:
213 *ib_speed = QEDR_SPEED_EDR;
214 *ib_width = IB_WIDTH_4X;
215 break;
216
217 default:
218 /* Unsupported */
219 *ib_speed = QEDR_SPEED_SDR;
220 *ib_width = IB_WIDTH_1X;
221 }
222}
223
224int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
225{
226 struct qedr_dev *dev;
227 struct qed_rdma_port *rdma_port;
228
229 dev = get_qedr_dev(ibdev);
230 if (port > 1) {
231 DP_ERR(dev, "invalid_port=0x%x\n", port);
232 return -EINVAL;
233 }
234
235 if (!dev->rdma_ctx) {
236 DP_ERR(dev, "rdma_ctx is NULL\n");
237 return -EINVAL;
238 }
239
240 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
241 memset(attr, 0, sizeof(*attr));
242
243 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244 attr->state = IB_PORT_ACTIVE;
245 attr->phys_state = 5;
246 } else {
247 attr->state = IB_PORT_DOWN;
248 attr->phys_state = 3;
249 }
250 attr->max_mtu = IB_MTU_4096;
251 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
252 attr->lid = 0;
253 attr->lmc = 0;
254 attr->sm_lid = 0;
255 attr->sm_sl = 0;
256 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257 attr->gid_tbl_len = QEDR_MAX_SGID;
258 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260 attr->qkey_viol_cntr = 0;
261 get_link_speed_and_width(rdma_port->link_speed,
262 &attr->active_speed, &attr->active_width);
263 attr->max_msg_sz = rdma_port->max_msg_size;
264 attr->max_vl_num = 4;
265
266 return 0;
267}
268
269int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270 struct ib_port_modify *props)
271{
272 struct qedr_dev *dev;
273
274 dev = get_qedr_dev(ibdev);
275 if (port > 1) {
276 DP_ERR(dev, "invalid_port=0x%x\n", port);
277 return -EINVAL;
278 }
279
280 return 0;
281}
282
283static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
284 unsigned long len)
285{
286 struct qedr_mm *mm;
287
288 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
289 if (!mm)
290 return -ENOMEM;
291
292 mm->key.phy_addr = phy_addr;
293 /* This function might be called with a length which is not a multiple
294 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295 * forces this granularity by increasing the requested size if needed.
296 * When qedr_mmap is called, it will search the list with the updated
297 * length as a key. To prevent search failures, the length is rounded up
298 * in advance to PAGE_SIZE.
299 */
300 mm->key.len = roundup(len, PAGE_SIZE);
301 INIT_LIST_HEAD(&mm->entry);
302
303 mutex_lock(&uctx->mm_list_lock);
304 list_add(&mm->entry, &uctx->mm_head);
305 mutex_unlock(&uctx->mm_list_lock);
306
307 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309 (unsigned long long)mm->key.phy_addr,
310 (unsigned long)mm->key.len, uctx);
311
312 return 0;
313}
314
315static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
316 unsigned long len)
317{
318 bool found = false;
319 struct qedr_mm *mm;
320
321 mutex_lock(&uctx->mm_list_lock);
322 list_for_each_entry(mm, &uctx->mm_head, entry) {
323 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
324 continue;
325
326 found = true;
327 break;
328 }
329 mutex_unlock(&uctx->mm_list_lock);
330 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332 mm->key.phy_addr, mm->key.len, uctx, found);
333
334 return found;
335}
336
337struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338 struct ib_udata *udata)
339{
340 int rc;
341 struct qedr_ucontext *ctx;
342 struct qedr_alloc_ucontext_resp uresp;
343 struct qedr_dev *dev = get_qedr_dev(ibdev);
344 struct qed_rdma_add_user_out_params oparams;
345
346 if (!udata)
347 return ERR_PTR(-EFAULT);
348
349 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
350 if (!ctx)
351 return ERR_PTR(-ENOMEM);
352
353 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
354 if (rc) {
355 DP_ERR(dev,
356 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
357 rc);
358 goto err;
359 }
360
361 ctx->dpi = oparams.dpi;
362 ctx->dpi_addr = oparams.dpi_addr;
363 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364 ctx->dpi_size = oparams.dpi_size;
365 INIT_LIST_HEAD(&ctx->mm_head);
366 mutex_init(&ctx->mm_list_lock);
367
368 memset(&uresp, 0, sizeof(uresp));
369
370 uresp.db_pa = ctx->dpi_phys_addr;
371 uresp.db_size = ctx->dpi_size;
372 uresp.max_send_wr = dev->attr.max_sqe;
373 uresp.max_recv_wr = dev->attr.max_rqe;
374 uresp.max_srq_wr = dev->attr.max_srq_wr;
375 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378 uresp.max_cqes = QEDR_MAX_CQES;
379
380 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
381 if (rc)
382 goto err;
383
384 ctx->dev = dev;
385
386 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
387 if (rc)
388 goto err;
389
390 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
391 &ctx->ibucontext);
392 return &ctx->ibucontext;
393
394err:
395 kfree(ctx);
396 return ERR_PTR(rc);
397}
398
399int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
400{
401 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402 struct qedr_mm *mm, *tmp;
403 int status = 0;
404
405 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
406 uctx);
407 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
408
409 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412 mm->key.phy_addr, mm->key.len, uctx);
413 list_del(&mm->entry);
414 kfree(mm);
415 }
416
417 kfree(uctx);
418 return status;
419}
420
421int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
422{
423 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424 struct qedr_dev *dev = get_qedr_dev(context->device);
425 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426 u64 unmapped_db = dev->db_phys_addr;
427 unsigned long len = (vma->vm_end - vma->vm_start);
428 int rc = 0;
429 bool found;
430
431 DP_DEBUG(dev, QEDR_MSG_INIT,
432 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434 if (vma->vm_start & (PAGE_SIZE - 1)) {
435 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
436 vma->vm_start);
437 return -EINVAL;
438 }
439
440 found = qedr_search_mmap(ucontext, vm_page, len);
441 if (!found) {
442 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
443 vma->vm_pgoff);
444 return -EINVAL;
445 }
446
447 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
448
449 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
450 dev->db_size))) {
451 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452 if (vma->vm_flags & VM_READ) {
453 DP_ERR(dev, "Trying to map doorbell bar for read\n");
454 return -EPERM;
455 }
456
457 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
458
459 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460 PAGE_SIZE, vma->vm_page_prot);
461 } else {
462 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463 rc = remap_pfn_range(vma, vma->vm_start,
464 vma->vm_pgoff, len, vma->vm_page_prot);
465 }
466 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
467 return rc;
468}
Ram Amrania7efd772016-10-10 13:15:33 +0300469
470struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata)
472{
473 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300474 struct qedr_pd *pd;
475 u16 pd_id;
476 int rc;
477
478 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
479 (udata && context) ? "User Lib" : "Kernel");
480
481 if (!dev->rdma_ctx) {
482 DP_ERR(dev, "invlaid RDMA context\n");
483 return ERR_PTR(-EINVAL);
484 }
485
486 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
487 if (!pd)
488 return ERR_PTR(-ENOMEM);
489
Ram Amranibbf61092017-01-24 13:51:42 +0200490 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
491 if (rc)
492 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300493
Ram Amrania7efd772016-10-10 13:15:33 +0300494 pd->pd_id = pd_id;
495
496 if (udata && context) {
Ram Amranibbf61092017-01-24 13:51:42 +0200497 struct qedr_alloc_pd_uresp uresp;
498
499 uresp.pd_id = pd_id;
500
Ram Amrania7efd772016-10-10 13:15:33 +0300501 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranibbf61092017-01-24 13:51:42 +0200502 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300503 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amranibbf61092017-01-24 13:51:42 +0200504 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
505 goto err;
506 }
507
508 pd->uctx = get_qedr_ucontext(context);
509 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300510 }
511
512 return &pd->ibpd;
Ram Amranibbf61092017-01-24 13:51:42 +0200513
514err:
515 kfree(pd);
516 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300517}
518
519int qedr_dealloc_pd(struct ib_pd *ibpd)
520{
521 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
522 struct qedr_pd *pd = get_qedr_pd(ibpd);
523
524 if (!pd)
525 pr_err("Invalid PD received in dealloc_pd\n");
526
527 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
528 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
529
530 kfree(pd);
531
532 return 0;
533}
534
535static void qedr_free_pbl(struct qedr_dev *dev,
536 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
537{
538 struct pci_dev *pdev = dev->pdev;
539 int i;
540
541 for (i = 0; i < pbl_info->num_pbls; i++) {
542 if (!pbl[i].va)
543 continue;
544 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
545 pbl[i].va, pbl[i].pa);
546 }
547
548 kfree(pbl);
549}
550
551#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
552#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
553
554#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
555#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
556#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
557
558static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
559 struct qedr_pbl_info *pbl_info,
560 gfp_t flags)
561{
562 struct pci_dev *pdev = dev->pdev;
563 struct qedr_pbl *pbl_table;
564 dma_addr_t *pbl_main_tbl;
565 dma_addr_t pa;
566 void *va;
567 int i;
568
569 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
570 if (!pbl_table)
571 return ERR_PTR(-ENOMEM);
572
573 for (i = 0; i < pbl_info->num_pbls; i++) {
574 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
575 &pa, flags);
576 if (!va)
577 goto err;
578
579 memset(va, 0, pbl_info->pbl_size);
580 pbl_table[i].va = va;
581 pbl_table[i].pa = pa;
582 }
583
584 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
585 * the first one with physical pointers to all of the rest
586 */
587 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
588 for (i = 0; i < pbl_info->num_pbls - 1; i++)
589 pbl_main_tbl[i] = pbl_table[i + 1].pa;
590
591 return pbl_table;
592
593err:
594 for (i--; i >= 0; i--)
595 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
596 pbl_table[i].va, pbl_table[i].pa);
597
598 qedr_free_pbl(dev, pbl_info, pbl_table);
599
600 return ERR_PTR(-ENOMEM);
601}
602
603static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
604 struct qedr_pbl_info *pbl_info,
605 u32 num_pbes, int two_layer_capable)
606{
607 u32 pbl_capacity;
608 u32 pbl_size;
609 u32 num_pbls;
610
611 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
612 if (num_pbes > MAX_PBES_TWO_LAYER) {
613 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
614 num_pbes);
615 return -EINVAL;
616 }
617
618 /* calculate required pbl page size */
619 pbl_size = MIN_FW_PBL_PAGE_SIZE;
620 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
621 NUM_PBES_ON_PAGE(pbl_size);
622
623 while (pbl_capacity < num_pbes) {
624 pbl_size *= 2;
625 pbl_capacity = pbl_size / sizeof(u64);
626 pbl_capacity = pbl_capacity * pbl_capacity;
627 }
628
629 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
630 num_pbls++; /* One for the layer0 ( points to the pbls) */
631 pbl_info->two_layered = true;
632 } else {
633 /* One layered PBL */
634 num_pbls = 1;
635 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
636 roundup_pow_of_two((num_pbes * sizeof(u64))));
637 pbl_info->two_layered = false;
638 }
639
640 pbl_info->num_pbls = num_pbls;
641 pbl_info->pbl_size = pbl_size;
642 pbl_info->num_pbes = num_pbes;
643
644 DP_DEBUG(dev, QEDR_MSG_MR,
645 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
646 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
647
648 return 0;
649}
650
651static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
652 struct qedr_pbl *pbl,
653 struct qedr_pbl_info *pbl_info)
654{
655 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
656 struct qedr_pbl *pbl_tbl;
657 struct scatterlist *sg;
658 struct regpair *pbe;
659 int entry;
660 u32 addr;
661
662 if (!pbl_info->num_pbes)
663 return;
664
665 /* If we have a two layered pbl, the first pbl points to the rest
666 * of the pbls and the first entry lays on the second pbl in the table
667 */
668 if (pbl_info->two_layered)
669 pbl_tbl = &pbl[1];
670 else
671 pbl_tbl = pbl;
672
673 pbe = (struct regpair *)pbl_tbl->va;
674 if (!pbe) {
675 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
676 return;
677 }
678
679 pbe_cnt = 0;
680
681 shift = ilog2(umem->page_size);
682
683 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
684 pages = sg_dma_len(sg) >> shift;
685 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
686 /* store the page address in pbe */
687 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
688 umem->page_size * pg_cnt);
689 addr = upper_32_bits(sg_dma_address(sg) +
690 umem->page_size * pg_cnt);
691 pbe->hi = cpu_to_le32(addr);
692 pbe_cnt++;
693 total_num_pbes++;
694 pbe++;
695
696 if (total_num_pbes == pbl_info->num_pbes)
697 return;
698
699 /* If the given pbl is full storing the pbes,
700 * move to next pbl.
701 */
702 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
703 pbl_tbl++;
704 pbe = (struct regpair *)pbl_tbl->va;
705 pbe_cnt = 0;
706 }
707 }
708 }
709}
710
711static int qedr_copy_cq_uresp(struct qedr_dev *dev,
712 struct qedr_cq *cq, struct ib_udata *udata)
713{
714 struct qedr_create_cq_uresp uresp;
715 int rc;
716
717 memset(&uresp, 0, sizeof(uresp));
718
719 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
720 uresp.icid = cq->icid;
721
722 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
723 if (rc)
724 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
725
726 return rc;
727}
728
729static void consume_cqe(struct qedr_cq *cq)
730{
731 if (cq->latest_cqe == cq->toggle_cqe)
732 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
733
734 cq->latest_cqe = qed_chain_consume(&cq->pbl);
735}
736
737static inline int qedr_align_cq_entries(int entries)
738{
739 u64 size, aligned_size;
740
741 /* We allocate an extra entry that we don't report to the FW. */
742 size = (entries + 1) * QEDR_CQE_SIZE;
743 aligned_size = ALIGN(size, PAGE_SIZE);
744
745 return aligned_size / QEDR_CQE_SIZE;
746}
747
748static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
749 struct qedr_dev *dev,
750 struct qedr_userq *q,
751 u64 buf_addr, size_t buf_len,
752 int access, int dmasync)
753{
754 int page_cnt;
755 int rc;
756
757 q->buf_addr = buf_addr;
758 q->buf_len = buf_len;
759 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
760 if (IS_ERR(q->umem)) {
761 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
762 PTR_ERR(q->umem));
763 return PTR_ERR(q->umem);
764 }
765
766 page_cnt = ib_umem_page_count(q->umem);
767 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
768 if (rc)
769 goto err0;
770
771 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
772 if (IS_ERR_OR_NULL(q->pbl_tbl))
773 goto err0;
774
775 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
776
777 return 0;
778
779err0:
780 ib_umem_release(q->umem);
781
782 return rc;
783}
784
785static inline void qedr_init_cq_params(struct qedr_cq *cq,
786 struct qedr_ucontext *ctx,
787 struct qedr_dev *dev, int vector,
788 int chain_entries, int page_cnt,
789 u64 pbl_ptr,
790 struct qed_rdma_create_cq_in_params
791 *params)
792{
793 memset(params, 0, sizeof(*params));
794 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
795 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
796 params->cnq_id = vector;
797 params->cq_size = chain_entries - 1;
798 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
799 params->pbl_num_pages = page_cnt;
800 params->pbl_ptr = pbl_ptr;
801 params->pbl_two_level = 0;
802}
803
804static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
805{
806 /* Flush data before signalling doorbell */
807 wmb();
808 cq->db.data.agg_flags = flags;
809 cq->db.data.value = cpu_to_le32(cons);
810 writeq(cq->db.raw, cq->db_addr);
811
812 /* Make sure write would stick */
813 mmiowb();
814}
815
816int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
817{
818 struct qedr_cq *cq = get_qedr_cq(ibcq);
819 unsigned long sflags;
820
821 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
822 return 0;
823
824 spin_lock_irqsave(&cq->cq_lock, sflags);
825
826 cq->arm_flags = 0;
827
828 if (flags & IB_CQ_SOLICITED)
829 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
830
831 if (flags & IB_CQ_NEXT_COMP)
832 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
833
834 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
835
836 spin_unlock_irqrestore(&cq->cq_lock, sflags);
837
838 return 0;
839}
840
841struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
842 const struct ib_cq_init_attr *attr,
843 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
844{
845 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
846 struct qed_rdma_destroy_cq_out_params destroy_oparams;
847 struct qed_rdma_destroy_cq_in_params destroy_iparams;
848 struct qedr_dev *dev = get_qedr_dev(ibdev);
849 struct qed_rdma_create_cq_in_params params;
850 struct qedr_create_cq_ureq ureq;
851 int vector = attr->comp_vector;
852 int entries = attr->cqe;
853 struct qedr_cq *cq;
854 int chain_entries;
855 int page_cnt;
856 u64 pbl_ptr;
857 u16 icid;
858 int rc;
859
860 DP_DEBUG(dev, QEDR_MSG_INIT,
861 "create_cq: called from %s. entries=%d, vector=%d\n",
862 udata ? "User Lib" : "Kernel", entries, vector);
863
864 if (entries > QEDR_MAX_CQES) {
865 DP_ERR(dev,
866 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
867 entries, QEDR_MAX_CQES);
868 return ERR_PTR(-EINVAL);
869 }
870
871 chain_entries = qedr_align_cq_entries(entries);
872 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
873
874 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
875 if (!cq)
876 return ERR_PTR(-ENOMEM);
877
878 if (udata) {
879 memset(&ureq, 0, sizeof(ureq));
880 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
881 DP_ERR(dev,
882 "create cq: problem copying data from user space\n");
883 goto err0;
884 }
885
886 if (!ureq.len) {
887 DP_ERR(dev,
888 "create cq: cannot create a cq with 0 entries\n");
889 goto err0;
890 }
891
892 cq->cq_type = QEDR_CQ_TYPE_USER;
893
894 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
895 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
896 if (rc)
897 goto err0;
898
899 pbl_ptr = cq->q.pbl_tbl->pa;
900 page_cnt = cq->q.pbl_info.num_pbes;
901 } else {
902 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
903
904 rc = dev->ops->common->chain_alloc(dev->cdev,
905 QED_CHAIN_USE_TO_CONSUME,
906 QED_CHAIN_MODE_PBL,
907 QED_CHAIN_CNT_TYPE_U32,
908 chain_entries,
909 sizeof(union rdma_cqe),
910 &cq->pbl);
911 if (rc)
912 goto err1;
913
914 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
915 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
916 }
917
918 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
919 pbl_ptr, &params);
920
921 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
922 if (rc)
923 goto err2;
924
925 cq->icid = icid;
926 cq->sig = QEDR_CQ_MAGIC_NUMBER;
927 spin_lock_init(&cq->cq_lock);
928
929 if (ib_ctx) {
930 rc = qedr_copy_cq_uresp(dev, cq, udata);
931 if (rc)
932 goto err3;
933 } else {
934 /* Generate doorbell address. */
935 cq->db_addr = dev->db_addr +
936 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
937 cq->db.data.icid = cq->icid;
938 cq->db.data.params = DB_AGG_CMD_SET <<
939 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
940
941 /* point to the very last element, passing it we will toggle */
942 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
943 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
944 cq->latest_cqe = NULL;
945 consume_cqe(cq);
946 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
947 }
948
949 DP_DEBUG(dev, QEDR_MSG_CQ,
950 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
951 cq->icid, cq, params.cq_size);
952
953 return &cq->ibcq;
954
955err3:
956 destroy_iparams.icid = cq->icid;
957 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
958 &destroy_oparams);
959err2:
960 if (udata)
961 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
962 else
963 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
964err1:
965 if (udata)
966 ib_umem_release(cq->q.umem);
967err0:
968 kfree(cq);
969 return ERR_PTR(-EINVAL);
970}
971
972int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
973{
974 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
975 struct qedr_cq *cq = get_qedr_cq(ibcq);
976
977 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
978
979 return 0;
980}
981
982int qedr_destroy_cq(struct ib_cq *ibcq)
983{
984 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
985 struct qed_rdma_destroy_cq_out_params oparams;
986 struct qed_rdma_destroy_cq_in_params iparams;
987 struct qedr_cq *cq = get_qedr_cq(ibcq);
988
989 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
990
991 /* GSIs CQs are handled by driver, so they don't exist in the FW */
992 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
993 iparams.icid = cq->icid;
994 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
995 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
996 }
997
998 if (ibcq->uobject && ibcq->uobject->context) {
999 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1000 ib_umem_release(cq->q.umem);
1001 }
1002
1003 kfree(cq);
1004
1005 return 0;
1006}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001007
1008static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1009 struct ib_qp_attr *attr,
1010 int attr_mask,
1011 struct qed_rdma_modify_qp_in_params
1012 *qp_params)
1013{
1014 enum rdma_network_type nw_type;
1015 struct ib_gid_attr gid_attr;
1016 union ib_gid gid;
1017 u32 ipv4_addr;
1018 int rc = 0;
1019 int i;
1020
1021 rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1022 attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1023 if (rc)
1024 return rc;
1025
1026 if (!memcmp(&gid, &zgid, sizeof(gid)))
1027 return -ENOENT;
1028
1029 if (gid_attr.ndev) {
1030 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1031
1032 dev_put(gid_attr.ndev);
1033 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1034 switch (nw_type) {
1035 case RDMA_NETWORK_IPV6:
1036 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1037 sizeof(qp_params->sgid));
1038 memcpy(&qp_params->dgid.bytes[0],
1039 &attr->ah_attr.grh.dgid,
1040 sizeof(qp_params->dgid));
1041 qp_params->roce_mode = ROCE_V2_IPV6;
1042 SET_FIELD(qp_params->modify_flags,
1043 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1044 break;
1045 case RDMA_NETWORK_IB:
1046 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1047 sizeof(qp_params->sgid));
1048 memcpy(&qp_params->dgid.bytes[0],
1049 &attr->ah_attr.grh.dgid,
1050 sizeof(qp_params->dgid));
1051 qp_params->roce_mode = ROCE_V1;
1052 break;
1053 case RDMA_NETWORK_IPV4:
1054 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1055 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1056 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1057 qp_params->sgid.ipv4_addr = ipv4_addr;
1058 ipv4_addr =
1059 qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1060 qp_params->dgid.ipv4_addr = ipv4_addr;
1061 SET_FIELD(qp_params->modify_flags,
1062 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1063 qp_params->roce_mode = ROCE_V2_IPV4;
1064 break;
1065 }
1066 }
1067
1068 for (i = 0; i < 4; i++) {
1069 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1070 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1071 }
1072
1073 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1074 qp_params->vlan_id = 0;
1075
1076 return 0;
1077}
1078
1079static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1080{
1081 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1082 ib_umem_release(qp->usq.umem);
1083}
1084
1085static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1086{
1087 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1088 ib_umem_release(qp->urq.umem);
1089}
1090
1091static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1092{
1093 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1094 kfree(qp->wqe_wr_id);
1095}
1096
1097static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1098{
1099 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1100 kfree(qp->rqe_wr_id);
1101}
1102
1103static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1104 struct ib_qp_init_attr *attrs)
1105{
1106 struct qedr_device_attr *qattr = &dev->attr;
1107
1108 /* QP0... attrs->qp_type == IB_QPT_GSI */
1109 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1110 DP_DEBUG(dev, QEDR_MSG_QP,
1111 "create qp: unsupported qp type=0x%x requested\n",
1112 attrs->qp_type);
1113 return -EINVAL;
1114 }
1115
1116 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1117 DP_ERR(dev,
1118 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1119 attrs->cap.max_send_wr, qattr->max_sqe);
1120 return -EINVAL;
1121 }
1122
1123 if (attrs->cap.max_inline_data > qattr->max_inline) {
1124 DP_ERR(dev,
1125 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1126 attrs->cap.max_inline_data, qattr->max_inline);
1127 return -EINVAL;
1128 }
1129
1130 if (attrs->cap.max_send_sge > qattr->max_sge) {
1131 DP_ERR(dev,
1132 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1133 attrs->cap.max_send_sge, qattr->max_sge);
1134 return -EINVAL;
1135 }
1136
1137 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1138 DP_ERR(dev,
1139 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1140 attrs->cap.max_recv_sge, qattr->max_sge);
1141 return -EINVAL;
1142 }
1143
1144 /* Unprivileged user space cannot create special QP */
1145 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1146 DP_ERR(dev,
1147 "create qp: userspace can't create special QPs of type=0x%x\n",
1148 attrs->qp_type);
1149 return -EINVAL;
1150 }
1151
1152 return 0;
1153}
1154
1155static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1156 struct qedr_qp *qp)
1157{
1158 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1159 uresp->rq_icid = qp->icid;
1160}
1161
1162static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1163 struct qedr_qp *qp)
1164{
1165 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1166 uresp->sq_icid = qp->icid + 1;
1167}
1168
1169static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1170 struct qedr_qp *qp, struct ib_udata *udata)
1171{
1172 struct qedr_create_qp_uresp uresp;
1173 int rc;
1174
1175 memset(&uresp, 0, sizeof(uresp));
1176 qedr_copy_sq_uresp(&uresp, qp);
1177 qedr_copy_rq_uresp(&uresp, qp);
1178
1179 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1180 uresp.qp_id = qp->qp_id;
1181
1182 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1183 if (rc)
1184 DP_ERR(dev,
1185 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1186 qp->icid);
1187
1188 return rc;
1189}
1190
1191static void qedr_set_qp_init_params(struct qedr_dev *dev,
1192 struct qedr_qp *qp,
1193 struct qedr_pd *pd,
1194 struct ib_qp_init_attr *attrs)
1195{
1196 qp->pd = pd;
1197
1198 spin_lock_init(&qp->q_lock);
1199
1200 qp->qp_type = attrs->qp_type;
1201 qp->max_inline_data = attrs->cap.max_inline_data;
1202 qp->sq.max_sges = attrs->cap.max_send_sge;
1203 qp->state = QED_ROCE_QP_STATE_RESET;
1204 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1205 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1206 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1207 qp->dev = dev;
1208
1209 DP_DEBUG(dev, QEDR_MSG_QP,
1210 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1211 pd->pd_id, qp->qp_type, qp->max_inline_data,
1212 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1213 DP_DEBUG(dev, QEDR_MSG_QP,
1214 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1215 qp->sq.max_sges, qp->sq_cq->icid);
1216 qp->rq.max_sges = attrs->cap.max_recv_sge;
1217 DP_DEBUG(dev, QEDR_MSG_QP,
1218 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1219 qp->rq.max_sges, qp->rq_cq->icid);
1220}
1221
1222static inline void
1223qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
1224 struct qedr_create_qp_ureq *ureq)
1225{
1226 /* QP handle to be written in CQE */
1227 params->qp_handle_lo = ureq->qp_handle_lo;
1228 params->qp_handle_hi = ureq->qp_handle_hi;
1229}
1230
1231static inline void
1232qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1233{
1234 qp->sq.db = dev->db_addr +
1235 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1236 qp->sq.db_data.data.icid = qp->icid + 1;
1237}
1238
1239static inline void
1240qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1241{
1242 qp->rq.db = dev->db_addr +
1243 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1244 qp->rq.db_data.data.icid = qp->icid;
1245}
1246
1247static inline int
1248qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
1249 struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
1250{
1251 /* Allocate driver internal RQ array */
1252 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1253 GFP_KERNEL);
1254 if (!qp->rqe_wr_id)
1255 return -ENOMEM;
1256
1257 DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
1258
1259 return 0;
1260}
1261
1262static inline int
1263qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
1264 struct qedr_qp *qp,
1265 struct ib_qp_init_attr *attrs,
1266 struct qed_rdma_create_qp_in_params *params)
1267{
1268 u32 temp_max_wr;
1269
1270 /* Allocate driver internal SQ array */
1271 temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
1272 temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
1273
1274 /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1275 qp->sq.max_wr = (u16)temp_max_wr;
1276 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1277 GFP_KERNEL);
1278 if (!qp->wqe_wr_id)
1279 return -ENOMEM;
1280
1281 DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
1282
1283 /* QP handle to be written in CQE */
1284 params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
1285 params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
1286
1287 return 0;
1288}
1289
1290static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
1291 struct qedr_qp *qp,
1292 struct ib_qp_init_attr *attrs)
1293{
1294 u32 n_sq_elems, n_sq_entries;
1295 int rc;
1296
1297 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1298 * the ring. The ring should allow at least a single WR, even if the
1299 * user requested none, due to allocation issues.
1300 */
1301 n_sq_entries = attrs->cap.max_send_wr;
1302 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1303 n_sq_entries = max_t(u32, n_sq_entries, 1);
1304 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1305 rc = dev->ops->common->chain_alloc(dev->cdev,
1306 QED_CHAIN_USE_TO_PRODUCE,
1307 QED_CHAIN_MODE_PBL,
1308 QED_CHAIN_CNT_TYPE_U32,
1309 n_sq_elems,
1310 QEDR_SQE_ELEMENT_SIZE,
1311 &qp->sq.pbl);
1312 if (rc) {
1313 DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
1314 return rc;
1315 }
1316
1317 DP_DEBUG(dev, QEDR_MSG_SQ,
1318 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1319 qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
1320 n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
1321 return 0;
1322}
1323
1324static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
1325 struct qedr_qp *qp,
1326 struct ib_qp_init_attr *attrs)
1327{
1328 u32 n_rq_elems, n_rq_entries;
1329 int rc;
1330
1331 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1332 * the ring. There ring should allow at least a single WR, even if the
1333 * user requested none, due to allocation issues.
1334 */
1335 n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
1336 n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1337 rc = dev->ops->common->chain_alloc(dev->cdev,
1338 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1339 QED_CHAIN_MODE_PBL,
1340 QED_CHAIN_CNT_TYPE_U32,
1341 n_rq_elems,
1342 QEDR_RQE_ELEMENT_SIZE,
1343 &qp->rq.pbl);
1344
1345 if (rc) {
1346 DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
1347 return -ENOMEM;
1348 }
1349
1350 DP_DEBUG(dev, QEDR_MSG_RQ,
1351 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1352 qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
1353 n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
1354
1355 /* n_rq_entries < u16 so the casting is safe */
1356 qp->rq.max_wr = (u16)n_rq_entries;
1357
1358 return 0;
1359}
1360
1361static inline void
1362qedr_init_qp_in_params_sq(struct qedr_dev *dev,
1363 struct qedr_pd *pd,
1364 struct qedr_qp *qp,
1365 struct ib_qp_init_attr *attrs,
1366 struct ib_udata *udata,
1367 struct qed_rdma_create_qp_in_params *params)
1368{
1369 /* QP handle to be written in an async event */
1370 params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
1371 params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
1372
1373 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1374 params->fmr_and_reserved_lkey = !udata;
1375 params->pd = pd->pd_id;
1376 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1377 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1378 params->max_sq_sges = 0;
1379 params->stats_queue = 0;
1380
1381 if (udata) {
1382 params->sq_num_pages = qp->usq.pbl_info.num_pbes;
1383 params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1384 } else {
1385 params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1386 params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1387 }
1388}
1389
1390static inline void
1391qedr_init_qp_in_params_rq(struct qedr_qp *qp,
1392 struct ib_qp_init_attr *attrs,
1393 struct ib_udata *udata,
1394 struct qed_rdma_create_qp_in_params *params)
1395{
1396 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1397 params->srq_id = 0;
1398 params->use_srq = false;
1399
1400 if (udata) {
1401 params->rq_num_pages = qp->urq.pbl_info.num_pbes;
1402 params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1403 } else {
1404 params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1405 params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1406 }
1407}
1408
1409static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1410{
1411 DP_DEBUG(dev, QEDR_MSG_QP,
1412 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1413 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
1414 qp->urq.buf_len);
1415}
1416
1417static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
1418 struct qedr_dev *dev,
1419 struct qedr_qp *qp,
1420 struct qedr_create_qp_ureq *ureq)
1421{
1422 int rc;
1423
1424 /* SQ - read access only (0), dma sync not required (0) */
1425 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
1426 ureq->sq_len, 0, 0);
1427 if (rc)
1428 return rc;
1429
1430 /* RQ - read access only (0), dma sync not required (0) */
1431 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
1432 ureq->rq_len, 0, 0);
1433
1434 if (rc)
1435 qedr_cleanup_user_sq(dev, qp);
1436 return rc;
1437}
1438
1439static inline int
1440qedr_init_kernel_qp(struct qedr_dev *dev,
1441 struct qedr_qp *qp,
1442 struct ib_qp_init_attr *attrs,
1443 struct qed_rdma_create_qp_in_params *params)
1444{
1445 int rc;
1446
1447 rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
1448 if (rc) {
1449 DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
1450 return rc;
1451 }
1452
1453 rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
1454 if (rc) {
1455 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1456 DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
1457 return rc;
1458 }
1459
1460 rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
1461 if (rc) {
1462 qedr_cleanup_kernel_sq(dev, qp);
1463 DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
1464 return rc;
1465 }
1466
1467 rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
1468 if (rc) {
1469 DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
1470 qedr_cleanup_kernel_sq(dev, qp);
1471 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1472 return rc;
1473 }
1474
1475 return rc;
1476}
1477
1478struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1479 struct ib_qp_init_attr *attrs,
1480 struct ib_udata *udata)
1481{
1482 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1483 struct qed_rdma_create_qp_out_params out_params;
1484 struct qed_rdma_create_qp_in_params in_params;
1485 struct qedr_pd *pd = get_qedr_pd(ibpd);
1486 struct ib_ucontext *ib_ctx = NULL;
1487 struct qedr_ucontext *ctx = NULL;
1488 struct qedr_create_qp_ureq ureq;
1489 struct qedr_qp *qp;
1490 int rc = 0;
1491
1492 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1493 udata ? "user library" : "kernel", pd);
1494
1495 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1496 if (rc)
1497 return ERR_PTR(rc);
1498
1499 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1500 if (!qp)
1501 return ERR_PTR(-ENOMEM);
1502
1503 if (attrs->srq)
1504 return ERR_PTR(-EINVAL);
1505
1506 DP_DEBUG(dev, QEDR_MSG_QP,
1507 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1508 get_qedr_cq(attrs->send_cq),
1509 get_qedr_cq(attrs->send_cq)->icid,
1510 get_qedr_cq(attrs->recv_cq),
1511 get_qedr_cq(attrs->recv_cq)->icid);
1512
1513 qedr_set_qp_init_params(dev, qp, pd, attrs);
1514
Ram Amrani04886772016-10-10 13:15:38 +03001515 if (attrs->qp_type == IB_QPT_GSI) {
1516 if (udata) {
1517 DP_ERR(dev,
1518 "create qp: unexpected udata when creating GSI QP\n");
1519 goto err0;
1520 }
1521 return qedr_create_gsi_qp(dev, attrs, qp);
1522 }
1523
Ram Amranicecbcdd2016-10-10 13:15:34 +03001524 memset(&in_params, 0, sizeof(in_params));
1525
1526 if (udata) {
1527 if (!(udata && ibpd->uobject && ibpd->uobject->context))
1528 goto err0;
1529
1530 ib_ctx = ibpd->uobject->context;
1531 ctx = get_qedr_ucontext(ib_ctx);
1532
1533 memset(&ureq, 0, sizeof(ureq));
1534 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1535 DP_ERR(dev,
1536 "create qp: problem copying data from user space\n");
1537 goto err0;
1538 }
1539
1540 rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
1541 if (rc)
1542 goto err0;
1543
1544 qedr_init_qp_user_params(&in_params, &ureq);
1545 } else {
1546 rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
1547 if (rc)
1548 goto err0;
1549 }
1550
1551 qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
1552 qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
1553
1554 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1555 &in_params, &out_params);
1556
1557 if (!qp->qed_qp)
1558 goto err1;
1559
1560 qp->qp_id = out_params.qp_id;
1561 qp->icid = out_params.icid;
1562 qp->ibqp.qp_num = qp->qp_id;
1563
1564 if (udata) {
1565 rc = qedr_copy_qp_uresp(dev, qp, udata);
1566 if (rc)
1567 goto err2;
1568
1569 qedr_qp_user_print(dev, qp);
1570 } else {
1571 qedr_init_qp_kernel_doorbell_sq(dev, qp);
1572 qedr_init_qp_kernel_doorbell_rq(dev, qp);
1573 }
1574
1575 DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
1576 udata ? "user" : "kernel", qp);
1577
1578 return &qp->ibqp;
1579
1580err2:
1581 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1582 if (rc)
1583 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1584err1:
1585 if (udata) {
1586 qedr_cleanup_user_sq(dev, qp);
1587 qedr_cleanup_user_rq(dev, qp);
1588 } else {
1589 qedr_cleanup_kernel_sq(dev, qp);
1590 qedr_cleanup_kernel_rq(dev, qp);
1591 }
1592
1593err0:
1594 kfree(qp);
1595
1596 return ERR_PTR(-EFAULT);
1597}
1598
1599enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1600{
1601 switch (qp_state) {
1602 case QED_ROCE_QP_STATE_RESET:
1603 return IB_QPS_RESET;
1604 case QED_ROCE_QP_STATE_INIT:
1605 return IB_QPS_INIT;
1606 case QED_ROCE_QP_STATE_RTR:
1607 return IB_QPS_RTR;
1608 case QED_ROCE_QP_STATE_RTS:
1609 return IB_QPS_RTS;
1610 case QED_ROCE_QP_STATE_SQD:
1611 return IB_QPS_SQD;
1612 case QED_ROCE_QP_STATE_ERR:
1613 return IB_QPS_ERR;
1614 case QED_ROCE_QP_STATE_SQE:
1615 return IB_QPS_SQE;
1616 }
1617 return IB_QPS_ERR;
1618}
1619
1620enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
1621{
1622 switch (qp_state) {
1623 case IB_QPS_RESET:
1624 return QED_ROCE_QP_STATE_RESET;
1625 case IB_QPS_INIT:
1626 return QED_ROCE_QP_STATE_INIT;
1627 case IB_QPS_RTR:
1628 return QED_ROCE_QP_STATE_RTR;
1629 case IB_QPS_RTS:
1630 return QED_ROCE_QP_STATE_RTS;
1631 case IB_QPS_SQD:
1632 return QED_ROCE_QP_STATE_SQD;
1633 case IB_QPS_ERR:
1634 return QED_ROCE_QP_STATE_ERR;
1635 default:
1636 return QED_ROCE_QP_STATE_ERR;
1637 }
1638}
1639
1640static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1641{
1642 qed_chain_reset(&qph->pbl);
1643 qph->prod = 0;
1644 qph->cons = 0;
1645 qph->wqe_cons = 0;
1646 qph->db_data.data.value = cpu_to_le16(0);
1647}
1648
1649static int qedr_update_qp_state(struct qedr_dev *dev,
1650 struct qedr_qp *qp,
1651 enum qed_roce_qp_state new_state)
1652{
1653 int status = 0;
1654
1655 if (new_state == qp->state)
Ram Amrani8d4198f2017-01-24 13:50:34 +02001656 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001657
1658 switch (qp->state) {
1659 case QED_ROCE_QP_STATE_RESET:
1660 switch (new_state) {
1661 case QED_ROCE_QP_STATE_INIT:
1662 qp->prev_wqe_size = 0;
1663 qedr_reset_qp_hwq_info(&qp->sq);
1664 qedr_reset_qp_hwq_info(&qp->rq);
1665 break;
1666 default:
1667 status = -EINVAL;
1668 break;
1669 };
1670 break;
1671 case QED_ROCE_QP_STATE_INIT:
1672 switch (new_state) {
1673 case QED_ROCE_QP_STATE_RTR:
1674 /* Update doorbell (in case post_recv was
1675 * done before move to RTR)
1676 */
1677 wmb();
1678 writel(qp->rq.db_data.raw, qp->rq.db);
1679 /* Make sure write takes effect */
1680 mmiowb();
1681 break;
1682 case QED_ROCE_QP_STATE_ERR:
1683 break;
1684 default:
1685 /* Invalid state change. */
1686 status = -EINVAL;
1687 break;
1688 };
1689 break;
1690 case QED_ROCE_QP_STATE_RTR:
1691 /* RTR->XXX */
1692 switch (new_state) {
1693 case QED_ROCE_QP_STATE_RTS:
1694 break;
1695 case QED_ROCE_QP_STATE_ERR:
1696 break;
1697 default:
1698 /* Invalid state change. */
1699 status = -EINVAL;
1700 break;
1701 };
1702 break;
1703 case QED_ROCE_QP_STATE_RTS:
1704 /* RTS->XXX */
1705 switch (new_state) {
1706 case QED_ROCE_QP_STATE_SQD:
1707 break;
1708 case QED_ROCE_QP_STATE_ERR:
1709 break;
1710 default:
1711 /* Invalid state change. */
1712 status = -EINVAL;
1713 break;
1714 };
1715 break;
1716 case QED_ROCE_QP_STATE_SQD:
1717 /* SQD->XXX */
1718 switch (new_state) {
1719 case QED_ROCE_QP_STATE_RTS:
1720 case QED_ROCE_QP_STATE_ERR:
1721 break;
1722 default:
1723 /* Invalid state change. */
1724 status = -EINVAL;
1725 break;
1726 };
1727 break;
1728 case QED_ROCE_QP_STATE_ERR:
1729 /* ERR->XXX */
1730 switch (new_state) {
1731 case QED_ROCE_QP_STATE_RESET:
Ram Amrani13a87582017-01-24 13:50:38 +02001732 if ((qp->rq.prod != qp->rq.cons) ||
1733 (qp->sq.prod != qp->sq.cons)) {
1734 DP_NOTICE(dev,
1735 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1736 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1737 qp->sq.cons);
1738 status = -EINVAL;
1739 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001740 break;
1741 default:
1742 status = -EINVAL;
1743 break;
1744 };
1745 break;
1746 default:
1747 status = -EINVAL;
1748 break;
1749 };
1750
1751 return status;
1752}
1753
1754int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1755 int attr_mask, struct ib_udata *udata)
1756{
1757 struct qedr_qp *qp = get_qedr_qp(ibqp);
1758 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1759 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1760 enum ib_qp_state old_qp_state, new_qp_state;
1761 int rc = 0;
1762
1763 DP_DEBUG(dev, QEDR_MSG_QP,
1764 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1765 attr->qp_state);
1766
1767 old_qp_state = qedr_get_ibqp_state(qp->state);
1768 if (attr_mask & IB_QP_STATE)
1769 new_qp_state = attr->qp_state;
1770 else
1771 new_qp_state = old_qp_state;
1772
1773 if (!ib_modify_qp_is_ok
1774 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1775 IB_LINK_LAYER_ETHERNET)) {
1776 DP_ERR(dev,
1777 "modify qp: invalid attribute mask=0x%x specified for\n"
1778 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1779 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1780 new_qp_state);
1781 rc = -EINVAL;
1782 goto err;
1783 }
1784
1785 /* Translate the masks... */
1786 if (attr_mask & IB_QP_STATE) {
1787 SET_FIELD(qp_params.modify_flags,
1788 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1789 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1790 }
1791
1792 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1793 qp_params.sqd_async = true;
1794
1795 if (attr_mask & IB_QP_PKEY_INDEX) {
1796 SET_FIELD(qp_params.modify_flags,
1797 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1798 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1799 rc = -EINVAL;
1800 goto err;
1801 }
1802
1803 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1804 }
1805
1806 if (attr_mask & IB_QP_QKEY)
1807 qp->qkey = attr->qkey;
1808
1809 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1810 SET_FIELD(qp_params.modify_flags,
1811 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1812 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1813 IB_ACCESS_REMOTE_READ;
1814 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1815 IB_ACCESS_REMOTE_WRITE;
1816 qp_params.incoming_atomic_en = attr->qp_access_flags &
1817 IB_ACCESS_REMOTE_ATOMIC;
1818 }
1819
1820 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1821 if (attr_mask & IB_QP_PATH_MTU) {
1822 if (attr->path_mtu < IB_MTU_256 ||
1823 attr->path_mtu > IB_MTU_4096) {
1824 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1825 rc = -EINVAL;
1826 goto err;
1827 }
1828 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1829 ib_mtu_enum_to_int(iboe_get_mtu
1830 (dev->ndev->mtu)));
1831 }
1832
1833 if (!qp->mtu) {
1834 qp->mtu =
1835 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1836 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1837 }
1838
1839 SET_FIELD(qp_params.modify_flags,
1840 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1841
1842 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1843 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1844 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1845
1846 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1847
1848 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1849 if (rc) {
1850 DP_ERR(dev,
1851 "modify qp: problems with GID index %d (rc=%d)\n",
1852 attr->ah_attr.grh.sgid_index, rc);
1853 return rc;
1854 }
1855
1856 rc = qedr_get_dmac(dev, &attr->ah_attr,
1857 qp_params.remote_mac_addr);
1858 if (rc)
1859 return rc;
1860
1861 qp_params.use_local_mac = true;
1862 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1863
1864 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1865 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1866 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1867 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1868 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1869 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1870 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1871 qp_params.remote_mac_addr);
1872;
1873
1874 qp_params.mtu = qp->mtu;
1875 qp_params.lb_indication = false;
1876 }
1877
1878 if (!qp_params.mtu) {
1879 /* Stay with current MTU */
1880 if (qp->mtu)
1881 qp_params.mtu = qp->mtu;
1882 else
1883 qp_params.mtu =
1884 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1885 }
1886
1887 if (attr_mask & IB_QP_TIMEOUT) {
1888 SET_FIELD(qp_params.modify_flags,
1889 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1890
Kalderon, Michal28091932018-03-21 14:51:50 +02001891 /* The received timeout value is an exponent used like this:
1892 * "12.7.34 LOCAL ACK TIMEOUT
1893 * Value representing the transport (ACK) timeout for use by
1894 * the remote, expressed as: 4.096 * 2^timeout [usec]"
1895 * The FW expects timeout in msec so we need to divide the usec
1896 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
1897 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
1898 * The value of zero means infinite so we use a 'max_t' to make
1899 * sure that sub 1 msec values will be configured as 1 msec.
1900 */
1901 if (attr->timeout)
1902 qp_params.ack_timeout =
1903 1 << max_t(int, attr->timeout - 8, 0);
1904 else
Ram Amranicecbcdd2016-10-10 13:15:34 +03001905 qp_params.ack_timeout = 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001906 }
Kalderon, Michal28091932018-03-21 14:51:50 +02001907
Ram Amranicecbcdd2016-10-10 13:15:34 +03001908 if (attr_mask & IB_QP_RETRY_CNT) {
1909 SET_FIELD(qp_params.modify_flags,
1910 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1911 qp_params.retry_cnt = attr->retry_cnt;
1912 }
1913
1914 if (attr_mask & IB_QP_RNR_RETRY) {
1915 SET_FIELD(qp_params.modify_flags,
1916 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1917 qp_params.rnr_retry_cnt = attr->rnr_retry;
1918 }
1919
1920 if (attr_mask & IB_QP_RQ_PSN) {
1921 SET_FIELD(qp_params.modify_flags,
1922 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1923 qp_params.rq_psn = attr->rq_psn;
1924 qp->rq_psn = attr->rq_psn;
1925 }
1926
1927 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1928 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1929 rc = -EINVAL;
1930 DP_ERR(dev,
1931 "unsupported max_rd_atomic=%d, supported=%d\n",
1932 attr->max_rd_atomic,
1933 dev->attr.max_qp_req_rd_atomic_resc);
1934 goto err;
1935 }
1936
1937 SET_FIELD(qp_params.modify_flags,
1938 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1939 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1940 }
1941
1942 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1943 SET_FIELD(qp_params.modify_flags,
1944 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1945 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1946 }
1947
1948 if (attr_mask & IB_QP_SQ_PSN) {
1949 SET_FIELD(qp_params.modify_flags,
1950 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1951 qp_params.sq_psn = attr->sq_psn;
1952 qp->sq_psn = attr->sq_psn;
1953 }
1954
1955 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1956 if (attr->max_dest_rd_atomic >
1957 dev->attr.max_qp_resp_rd_atomic_resc) {
1958 DP_ERR(dev,
1959 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1960 attr->max_dest_rd_atomic,
1961 dev->attr.max_qp_resp_rd_atomic_resc);
1962
1963 rc = -EINVAL;
1964 goto err;
1965 }
1966
1967 SET_FIELD(qp_params.modify_flags,
1968 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1969 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1970 }
1971
1972 if (attr_mask & IB_QP_DEST_QPN) {
1973 SET_FIELD(qp_params.modify_flags,
1974 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1975
1976 qp_params.dest_qp = attr->dest_qp_num;
1977 qp->dest_qp_num = attr->dest_qp_num;
1978 }
1979
1980 if (qp->qp_type != IB_QPT_GSI)
1981 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1982 qp->qed_qp, &qp_params);
1983
1984 if (attr_mask & IB_QP_STATE) {
1985 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
1986 qedr_update_qp_state(dev, qp, qp_params.new_state);
1987 qp->state = qp_params.new_state;
1988 }
1989
1990err:
1991 return rc;
1992}
1993
1994static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1995{
1996 int ib_qp_acc_flags = 0;
1997
1998 if (params->incoming_rdma_write_en)
1999 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2000 if (params->incoming_rdma_read_en)
2001 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2002 if (params->incoming_atomic_en)
2003 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2004 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2005 return ib_qp_acc_flags;
2006}
2007
2008int qedr_query_qp(struct ib_qp *ibqp,
2009 struct ib_qp_attr *qp_attr,
2010 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2011{
2012 struct qed_rdma_query_qp_out_params params;
2013 struct qedr_qp *qp = get_qedr_qp(ibqp);
2014 struct qedr_dev *dev = qp->dev;
2015 int rc = 0;
2016
2017 memset(&params, 0, sizeof(params));
2018
2019 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2020 if (rc)
2021 goto err;
2022
2023 memset(qp_attr, 0, sizeof(*qp_attr));
2024 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2025
2026 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2027 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2028 qp_attr->path_mtu = iboe_get_mtu(params.mtu);
2029 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2030 qp_attr->rq_psn = params.rq_psn;
2031 qp_attr->sq_psn = params.sq_psn;
2032 qp_attr->dest_qp_num = params.dest_qp;
2033
2034 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2035
2036 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2037 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2038 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2039 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani57211e82017-01-24 13:50:35 +02002040 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002041 qp_init_attr->cap = qp_attr->cap;
2042
2043 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
2044 sizeof(qp_attr->ah_attr.grh.dgid.raw));
2045
2046 qp_attr->ah_attr.grh.flow_label = params.flow_label;
2047 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
2048 qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
2049 qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
2050
2051 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
2052 qp_attr->ah_attr.port_num = 1;
2053 qp_attr->ah_attr.sl = 0;
2054 qp_attr->timeout = params.timeout;
2055 qp_attr->rnr_retry = params.rnr_retry;
2056 qp_attr->retry_cnt = params.retry_cnt;
2057 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2058 qp_attr->pkey_index = params.pkey_index;
2059 qp_attr->port_num = 1;
2060 qp_attr->ah_attr.src_path_bits = 0;
2061 qp_attr->ah_attr.static_rate = 0;
2062 qp_attr->alt_pkey_index = 0;
2063 qp_attr->alt_port_num = 0;
2064 qp_attr->alt_timeout = 0;
2065 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2066
2067 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2068 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2069 qp_attr->max_rd_atomic = params.max_rd_atomic;
2070 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2071
2072 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2073 qp_attr->cap.max_inline_data);
2074
2075err:
2076 return rc;
2077}
2078
2079int qedr_destroy_qp(struct ib_qp *ibqp)
2080{
2081 struct qedr_qp *qp = get_qedr_qp(ibqp);
2082 struct qedr_dev *dev = qp->dev;
2083 struct ib_qp_attr attr;
2084 int attr_mask = 0;
2085 int rc = 0;
2086
2087 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2088 qp, qp->qp_type);
2089
2090 if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
2091 QED_ROCE_QP_STATE_INIT)) {
2092 attr.qp_state = IB_QPS_ERR;
2093 attr_mask |= IB_QP_STATE;
2094
2095 /* Change the QP state to ERROR */
2096 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2097 }
2098
2099 if (qp->qp_type != IB_QPT_GSI) {
2100 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2101 if (rc)
2102 return rc;
Ram Amrani04886772016-10-10 13:15:38 +03002103 } else {
2104 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002105 }
2106
2107 if (ibqp->uobject && ibqp->uobject->context) {
2108 qedr_cleanup_user_sq(dev, qp);
2109 qedr_cleanup_user_rq(dev, qp);
2110 } else {
2111 qedr_cleanup_kernel_sq(dev, qp);
2112 qedr_cleanup_kernel_rq(dev, qp);
2113 }
2114
2115 kfree(qp);
2116
2117 return rc;
2118}
Ram Amranie0290cc2016-10-10 13:15:35 +03002119
Ram Amrani04886772016-10-10 13:15:38 +03002120struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
2121{
2122 struct qedr_ah *ah;
2123
2124 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2125 if (!ah)
2126 return ERR_PTR(-ENOMEM);
2127
2128 ah->attr = *attr;
2129
2130 return &ah->ibah;
2131}
2132
2133int qedr_destroy_ah(struct ib_ah *ibah)
2134{
2135 struct qedr_ah *ah = get_qedr_ah(ibah);
2136
2137 kfree(ah);
2138 return 0;
2139}
2140
Ram Amranie0290cc2016-10-10 13:15:35 +03002141static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2142{
2143 struct qedr_pbl *pbl, *tmp;
2144
2145 if (info->pbl_table)
2146 list_add_tail(&info->pbl_table->list_entry,
2147 &info->free_pbl_list);
2148
2149 if (!list_empty(&info->inuse_pbl_list))
2150 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2151
2152 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2153 list_del(&pbl->list_entry);
2154 qedr_free_pbl(dev, &info->pbl_info, pbl);
2155 }
2156}
2157
2158static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2159 size_t page_list_len, bool two_layered)
2160{
2161 struct qedr_pbl *tmp;
2162 int rc;
2163
2164 INIT_LIST_HEAD(&info->free_pbl_list);
2165 INIT_LIST_HEAD(&info->inuse_pbl_list);
2166
2167 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2168 page_list_len, two_layered);
2169 if (rc)
2170 goto done;
2171
2172 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2173 if (!info->pbl_table) {
2174 rc = -ENOMEM;
2175 goto done;
2176 }
2177
2178 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2179 &info->pbl_table->pa);
2180
2181 /* in usual case we use 2 PBLs, so we add one to free
2182 * list and allocating another one
2183 */
2184 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2185 if (!tmp) {
2186 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2187 goto done;
2188 }
2189
2190 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2191
2192 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2193
2194done:
2195 if (rc)
2196 free_mr_info(dev, info);
2197
2198 return rc;
2199}
2200
2201struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2202 u64 usr_addr, int acc, struct ib_udata *udata)
2203{
2204 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2205 struct qedr_mr *mr;
2206 struct qedr_pd *pd;
2207 int rc = -ENOMEM;
2208
2209 pd = get_qedr_pd(ibpd);
2210 DP_DEBUG(dev, QEDR_MSG_MR,
2211 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2212 pd->pd_id, start, len, usr_addr, acc);
2213
2214 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2215 return ERR_PTR(-EINVAL);
2216
2217 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2218 if (!mr)
2219 return ERR_PTR(rc);
2220
2221 mr->type = QEDR_MR_USER;
2222
2223 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2224 if (IS_ERR(mr->umem)) {
2225 rc = -EFAULT;
2226 goto err0;
2227 }
2228
2229 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2230 if (rc)
2231 goto err1;
2232
2233 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2234 &mr->info.pbl_info);
2235
2236 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2237 if (rc) {
2238 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2239 goto err1;
2240 }
2241
2242 /* Index only, 18 bit long, lkey = itid << 8 | key */
2243 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2244 mr->hw_mr.key = 0;
2245 mr->hw_mr.pd = pd->pd_id;
2246 mr->hw_mr.local_read = 1;
2247 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2248 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2249 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2250 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2251 mr->hw_mr.mw_bind = false;
2252 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2253 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2254 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2255 mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2256 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2257 mr->hw_mr.length = len;
2258 mr->hw_mr.vaddr = usr_addr;
2259 mr->hw_mr.zbva = false;
2260 mr->hw_mr.phy_mr = false;
2261 mr->hw_mr.dma_mr = false;
2262
2263 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2264 if (rc) {
2265 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2266 goto err2;
2267 }
2268
2269 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2270 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2271 mr->hw_mr.remote_atomic)
2272 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2273
2274 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2275 mr->ibmr.lkey);
2276 return &mr->ibmr;
2277
2278err2:
2279 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2280err1:
2281 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2282err0:
2283 kfree(mr);
2284 return ERR_PTR(rc);
2285}
2286
2287int qedr_dereg_mr(struct ib_mr *ib_mr)
2288{
2289 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2290 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2291 int rc = 0;
2292
2293 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2294 if (rc)
2295 return rc;
2296
2297 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2298
2299 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2300 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2301
2302 /* it could be user registered memory. */
2303 if (mr->umem)
2304 ib_umem_release(mr->umem);
2305
2306 kfree(mr);
2307
2308 return rc;
2309}
2310
2311struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
2312{
2313 struct qedr_pd *pd = get_qedr_pd(ibpd);
2314 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2315 struct qedr_mr *mr;
2316 int rc = -ENOMEM;
2317
2318 DP_DEBUG(dev, QEDR_MSG_MR,
2319 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2320 max_page_list_len);
2321
2322 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2323 if (!mr)
2324 return ERR_PTR(rc);
2325
2326 mr->dev = dev;
2327 mr->type = QEDR_MR_FRMR;
2328
2329 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2330 if (rc)
2331 goto err0;
2332
2333 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2334 if (rc) {
2335 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2336 goto err0;
2337 }
2338
2339 /* Index only, 18 bit long, lkey = itid << 8 | key */
2340 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2341 mr->hw_mr.key = 0;
2342 mr->hw_mr.pd = pd->pd_id;
2343 mr->hw_mr.local_read = 1;
2344 mr->hw_mr.local_write = 0;
2345 mr->hw_mr.remote_read = 0;
2346 mr->hw_mr.remote_write = 0;
2347 mr->hw_mr.remote_atomic = 0;
2348 mr->hw_mr.mw_bind = false;
2349 mr->hw_mr.pbl_ptr = 0;
2350 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2351 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2352 mr->hw_mr.fbo = 0;
2353 mr->hw_mr.length = 0;
2354 mr->hw_mr.vaddr = 0;
2355 mr->hw_mr.zbva = false;
2356 mr->hw_mr.phy_mr = true;
2357 mr->hw_mr.dma_mr = false;
2358
2359 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2360 if (rc) {
2361 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2362 goto err1;
2363 }
2364
2365 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2366 mr->ibmr.rkey = mr->ibmr.lkey;
2367
2368 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2369 return mr;
2370
2371err1:
2372 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2373err0:
2374 kfree(mr);
2375 return ERR_PTR(rc);
2376}
2377
2378struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2379 enum ib_mr_type mr_type, u32 max_num_sg)
2380{
2381 struct qedr_dev *dev;
2382 struct qedr_mr *mr;
2383
2384 if (mr_type != IB_MR_TYPE_MEM_REG)
2385 return ERR_PTR(-EINVAL);
2386
2387 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2388
2389 if (IS_ERR(mr))
2390 return ERR_PTR(-EINVAL);
2391
2392 dev = mr->dev;
2393
2394 return &mr->ibmr;
2395}
2396
2397static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2398{
2399 struct qedr_mr *mr = get_qedr_mr(ibmr);
2400 struct qedr_pbl *pbl_table;
2401 struct regpair *pbe;
2402 u32 pbes_in_page;
2403
2404 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2405 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2406 return -ENOMEM;
2407 }
2408
2409 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2410 mr->npages, addr);
2411
2412 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2413 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2414 pbe = (struct regpair *)pbl_table->va;
2415 pbe += mr->npages % pbes_in_page;
2416 pbe->lo = cpu_to_le32((u32)addr);
2417 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2418
2419 mr->npages++;
2420
2421 return 0;
2422}
2423
2424static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2425{
2426 int work = info->completed - info->completed_handled - 1;
2427
2428 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2429 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2430 struct qedr_pbl *pbl;
2431
2432 /* Free all the page list that are possible to be freed
2433 * (all the ones that were invalidated), under the assumption
2434 * that if an FMR was completed successfully that means that
2435 * if there was an invalidate operation before it also ended
2436 */
2437 pbl = list_first_entry(&info->inuse_pbl_list,
2438 struct qedr_pbl, list_entry);
2439 list_del(&pbl->list_entry);
2440 list_add_tail(&pbl->list_entry, &info->free_pbl_list);
2441 info->completed_handled++;
2442 }
2443}
2444
2445int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2446 int sg_nents, unsigned int *sg_offset)
2447{
2448 struct qedr_mr *mr = get_qedr_mr(ibmr);
2449
2450 mr->npages = 0;
2451
2452 handle_completed_mrs(mr->dev, &mr->info);
2453 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2454}
2455
2456struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2457{
2458 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2459 struct qedr_pd *pd = get_qedr_pd(ibpd);
2460 struct qedr_mr *mr;
2461 int rc;
2462
2463 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2464 if (!mr)
2465 return ERR_PTR(-ENOMEM);
2466
2467 mr->type = QEDR_MR_DMA;
2468
2469 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2470 if (rc) {
2471 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2472 goto err1;
2473 }
2474
2475 /* index only, 18 bit long, lkey = itid << 8 | key */
2476 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2477 mr->hw_mr.pd = pd->pd_id;
2478 mr->hw_mr.local_read = 1;
2479 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2480 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2481 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2482 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2483 mr->hw_mr.dma_mr = true;
2484
2485 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2486 if (rc) {
2487 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2488 goto err2;
2489 }
2490
2491 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2492 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2493 mr->hw_mr.remote_atomic)
2494 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2495
2496 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2497 return &mr->ibmr;
2498
2499err2:
2500 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2501err1:
2502 kfree(mr);
2503 return ERR_PTR(rc);
2504}
Ram Amraniafa0e132016-10-10 13:15:36 +03002505
2506static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2507{
2508 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2509}
2510
2511static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2512{
2513 int i, len = 0;
2514
2515 for (i = 0; i < num_sge; i++)
2516 len += sg_list[i].length;
2517
2518 return len;
2519}
2520
2521static void swap_wqe_data64(u64 *p)
2522{
2523 int i;
2524
2525 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2526 *p = cpu_to_be64(cpu_to_le64(*p));
2527}
2528
2529static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2530 struct qedr_qp *qp, u8 *wqe_size,
2531 struct ib_send_wr *wr,
2532 struct ib_send_wr **bad_wr, u8 *bits,
2533 u8 bit)
2534{
2535 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2536 char *seg_prt, *wqe;
2537 int i, seg_siz;
2538
2539 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2540 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2541 *bad_wr = wr;
2542 return 0;
2543 }
2544
2545 if (!data_size)
2546 return data_size;
2547
2548 *bits |= bit;
2549
2550 seg_prt = NULL;
2551 wqe = NULL;
2552 seg_siz = 0;
2553
2554 /* Copy data inline */
2555 for (i = 0; i < wr->num_sge; i++) {
2556 u32 len = wr->sg_list[i].length;
2557 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2558
2559 while (len > 0) {
2560 u32 cur;
2561
2562 /* New segment required */
2563 if (!seg_siz) {
2564 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2565 seg_prt = wqe;
2566 seg_siz = sizeof(struct rdma_sq_common_wqe);
2567 (*wqe_size)++;
2568 }
2569
2570 /* Calculate currently allowed length */
2571 cur = min_t(u32, len, seg_siz);
2572 memcpy(seg_prt, src, cur);
2573
2574 /* Update segment variables */
2575 seg_prt += cur;
2576 seg_siz -= cur;
2577
2578 /* Update sge variables */
2579 src += cur;
2580 len -= cur;
2581
2582 /* Swap fully-completed segments */
2583 if (!seg_siz)
2584 swap_wqe_data64((u64 *)wqe);
2585 }
2586 }
2587
2588 /* swap last not completed segment */
2589 if (seg_siz)
2590 swap_wqe_data64((u64 *)wqe);
2591
2592 return data_size;
2593}
2594
2595#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2596 do { \
2597 DMA_REGPAIR_LE(sge->addr, vaddr); \
2598 (sge)->length = cpu_to_le32(vlength); \
2599 (sge)->flags = cpu_to_le32(vflags); \
2600 } while (0)
2601
2602#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2603 do { \
2604 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2605 (hdr)->num_sges = num_sge; \
2606 } while (0)
2607
2608#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2609 do { \
2610 DMA_REGPAIR_LE(sge->addr, vaddr); \
2611 (sge)->length = cpu_to_le32(vlength); \
2612 (sge)->l_key = cpu_to_le32(vlkey); \
2613 } while (0)
2614
2615static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2616 struct ib_send_wr *wr)
2617{
2618 u32 data_size = 0;
2619 int i;
2620
2621 for (i = 0; i < wr->num_sge; i++) {
2622 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2623
2624 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2625 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2626 sge->length = cpu_to_le32(wr->sg_list[i].length);
2627 data_size += wr->sg_list[i].length;
2628 }
2629
2630 if (wqe_size)
2631 *wqe_size += wr->num_sge;
2632
2633 return data_size;
2634}
2635
2636static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2637 struct qedr_qp *qp,
2638 struct rdma_sq_rdma_wqe_1st *rwqe,
2639 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2640 struct ib_send_wr *wr,
2641 struct ib_send_wr **bad_wr)
2642{
2643 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2644 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2645
2646 if (wr->send_flags & IB_SEND_INLINE) {
2647 u8 flags = 0;
2648
2649 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2650 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2651 bad_wr, &rwqe->flags, flags);
2652 }
2653
2654 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2655}
2656
2657static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2658 struct qedr_qp *qp,
2659 struct rdma_sq_send_wqe_1st *swqe,
2660 struct rdma_sq_send_wqe_2st *swqe2,
2661 struct ib_send_wr *wr,
2662 struct ib_send_wr **bad_wr)
2663{
2664 memset(swqe2, 0, sizeof(*swqe2));
2665 if (wr->send_flags & IB_SEND_INLINE) {
2666 u8 flags = 0;
2667
2668 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2669 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2670 bad_wr, &swqe->flags, flags);
2671 }
2672
2673 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2674}
2675
2676static int qedr_prepare_reg(struct qedr_qp *qp,
2677 struct rdma_sq_fmr_wqe_1st *fwqe1,
2678 struct ib_reg_wr *wr)
2679{
2680 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2681 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2682
2683 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2684 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2685 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2686 fwqe1->l_key = wr->key;
2687
2688 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2689 !!(wr->access & IB_ACCESS_REMOTE_READ));
2690 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2691 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2692 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2693 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2694 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2695 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2696 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2697 fwqe2->fmr_ctrl = 0;
2698
2699 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2700 ilog2(mr->ibmr.page_size) - 12);
2701
2702 fwqe2->length_hi = 0;
2703 fwqe2->length_lo = mr->ibmr.length;
2704 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2705 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2706
2707 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2708
2709 return 0;
2710}
2711
2712enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2713{
2714 switch (opcode) {
2715 case IB_WR_RDMA_WRITE:
2716 case IB_WR_RDMA_WRITE_WITH_IMM:
2717 return IB_WC_RDMA_WRITE;
2718 case IB_WR_SEND_WITH_IMM:
2719 case IB_WR_SEND:
2720 case IB_WR_SEND_WITH_INV:
2721 return IB_WC_SEND;
2722 case IB_WR_RDMA_READ:
2723 return IB_WC_RDMA_READ;
2724 case IB_WR_ATOMIC_CMP_AND_SWP:
2725 return IB_WC_COMP_SWAP;
2726 case IB_WR_ATOMIC_FETCH_AND_ADD:
2727 return IB_WC_FETCH_ADD;
2728 case IB_WR_REG_MR:
2729 return IB_WC_REG_MR;
2730 case IB_WR_LOCAL_INV:
2731 return IB_WC_LOCAL_INV;
2732 default:
2733 return IB_WC_SEND;
2734 }
2735}
2736
2737inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2738{
2739 int wq_is_full, err_wr, pbl_is_full;
2740 struct qedr_dev *dev = qp->dev;
2741
2742 /* prevent SQ overflow and/or processing of a bad WR */
2743 err_wr = wr->num_sge > qp->sq.max_sges;
2744 wq_is_full = qedr_wq_is_full(&qp->sq);
2745 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2746 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2747 if (wq_is_full || err_wr || pbl_is_full) {
2748 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2749 DP_ERR(dev,
2750 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2751 qp);
2752 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2753 }
2754
2755 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2756 DP_ERR(dev,
2757 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2758 qp);
2759 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2760 }
2761
2762 if (pbl_is_full &&
2763 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2764 DP_ERR(dev,
2765 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2766 qp);
2767 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2768 }
2769 return false;
2770 }
2771 return true;
2772}
2773
2774int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2775 struct ib_send_wr **bad_wr)
2776{
2777 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2778 struct qedr_qp *qp = get_qedr_qp(ibqp);
2779 struct rdma_sq_atomic_wqe_1st *awqe1;
2780 struct rdma_sq_atomic_wqe_2nd *awqe2;
2781 struct rdma_sq_atomic_wqe_3rd *awqe3;
2782 struct rdma_sq_send_wqe_2st *swqe2;
2783 struct rdma_sq_local_inv_wqe *iwqe;
2784 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2785 struct rdma_sq_send_wqe_1st *swqe;
2786 struct rdma_sq_rdma_wqe_1st *rwqe;
2787 struct rdma_sq_fmr_wqe_1st *fwqe1;
2788 struct rdma_sq_common_wqe *wqe;
2789 u32 length;
2790 int rc = 0;
2791 bool comp;
2792
2793 if (!qedr_can_post_send(qp, wr)) {
2794 *bad_wr = wr;
2795 return -ENOMEM;
2796 }
2797
2798 wqe = qed_chain_produce(&qp->sq.pbl);
2799 qp->wqe_wr_id[qp->sq.prod].signaled =
2800 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2801
2802 wqe->flags = 0;
2803 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2804 !!(wr->send_flags & IB_SEND_SOLICITED));
2805 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2806 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2807 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2808 !!(wr->send_flags & IB_SEND_FENCE));
2809 wqe->prev_wqe_size = qp->prev_wqe_size;
2810
2811 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2812
2813 switch (wr->opcode) {
2814 case IB_WR_SEND_WITH_IMM:
Kalderon, Michalfccbe382018-03-05 10:50:11 +02002815 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
2816 rc = -EINVAL;
2817 *bad_wr = wr;
2818 break;
2819 }
Ram Amraniafa0e132016-10-10 13:15:36 +03002820 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2821 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2822 swqe->wqe_size = 2;
2823 swqe2 = qed_chain_produce(&qp->sq.pbl);
2824
2825 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2826 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2827 wr, bad_wr);
2828 swqe->length = cpu_to_le32(length);
2829 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2830 qp->prev_wqe_size = swqe->wqe_size;
2831 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2832 break;
2833 case IB_WR_SEND:
2834 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2835 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2836
2837 swqe->wqe_size = 2;
2838 swqe2 = qed_chain_produce(&qp->sq.pbl);
2839 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2840 wr, bad_wr);
2841 swqe->length = cpu_to_le32(length);
2842 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2843 qp->prev_wqe_size = swqe->wqe_size;
2844 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2845 break;
2846 case IB_WR_SEND_WITH_INV:
2847 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2848 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2849 swqe2 = qed_chain_produce(&qp->sq.pbl);
2850 swqe->wqe_size = 2;
2851 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2852 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2853 wr, bad_wr);
2854 swqe->length = cpu_to_le32(length);
2855 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2856 qp->prev_wqe_size = swqe->wqe_size;
2857 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2858 break;
2859
2860 case IB_WR_RDMA_WRITE_WITH_IMM:
Kalderon, Michalfccbe382018-03-05 10:50:11 +02002861 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
2862 rc = -EINVAL;
2863 *bad_wr = wr;
2864 break;
2865 }
Ram Amraniafa0e132016-10-10 13:15:36 +03002866 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2867 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2868
2869 rwqe->wqe_size = 2;
2870 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2871 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2872 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2873 wr, bad_wr);
2874 rwqe->length = cpu_to_le32(length);
2875 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2876 qp->prev_wqe_size = rwqe->wqe_size;
2877 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2878 break;
2879 case IB_WR_RDMA_WRITE:
2880 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2881 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2882
2883 rwqe->wqe_size = 2;
2884 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2885 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2886 wr, bad_wr);
2887 rwqe->length = cpu_to_le32(length);
2888 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2889 qp->prev_wqe_size = rwqe->wqe_size;
2890 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2891 break;
2892 case IB_WR_RDMA_READ_WITH_INV:
2893 DP_ERR(dev,
2894 "RDMA READ WITH INVALIDATE not supported\n");
2895 *bad_wr = wr;
2896 rc = -EINVAL;
2897 break;
2898
2899 case IB_WR_RDMA_READ:
2900 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2901 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2902
2903 rwqe->wqe_size = 2;
2904 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2905 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2906 wr, bad_wr);
2907 rwqe->length = cpu_to_le32(length);
2908 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2909 qp->prev_wqe_size = rwqe->wqe_size;
2910 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2911 break;
2912
2913 case IB_WR_ATOMIC_CMP_AND_SWP:
2914 case IB_WR_ATOMIC_FETCH_AND_ADD:
2915 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2916 awqe1->wqe_size = 4;
2917
2918 awqe2 = qed_chain_produce(&qp->sq.pbl);
2919 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2920 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2921
2922 awqe3 = qed_chain_produce(&qp->sq.pbl);
2923
2924 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2925 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2926 DMA_REGPAIR_LE(awqe3->swap_data,
2927 atomic_wr(wr)->compare_add);
2928 } else {
2929 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2930 DMA_REGPAIR_LE(awqe3->swap_data,
2931 atomic_wr(wr)->swap);
2932 DMA_REGPAIR_LE(awqe3->cmp_data,
2933 atomic_wr(wr)->compare_add);
2934 }
2935
2936 qedr_prepare_sq_sges(qp, NULL, wr);
2937
2938 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2939 qp->prev_wqe_size = awqe1->wqe_size;
2940 break;
2941
2942 case IB_WR_LOCAL_INV:
2943 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2944 iwqe->wqe_size = 1;
2945
2946 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2947 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2948 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2949 qp->prev_wqe_size = iwqe->wqe_size;
2950 break;
2951 case IB_WR_REG_MR:
2952 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2953 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2954 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2955 fwqe1->wqe_size = 2;
2956
2957 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2958 if (rc) {
2959 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2960 *bad_wr = wr;
2961 break;
2962 }
2963
2964 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2965 qp->prev_wqe_size = fwqe1->wqe_size;
2966 break;
2967 default:
2968 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2969 rc = -EINVAL;
2970 *bad_wr = wr;
2971 break;
2972 }
2973
2974 if (*bad_wr) {
2975 u16 value;
2976
2977 /* Restore prod to its position before
2978 * this WR was processed
2979 */
2980 value = le16_to_cpu(qp->sq.db_data.data.value);
2981 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2982
2983 /* Restore prev_wqe_size */
2984 qp->prev_wqe_size = wqe->prev_wqe_size;
2985 rc = -EINVAL;
2986 DP_ERR(dev, "POST SEND FAILED\n");
2987 }
2988
2989 return rc;
2990}
2991
2992int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2993 struct ib_send_wr **bad_wr)
2994{
2995 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2996 struct qedr_qp *qp = get_qedr_qp(ibqp);
2997 unsigned long flags;
2998 int rc = 0;
2999
3000 *bad_wr = NULL;
3001
Ram Amrani04886772016-10-10 13:15:38 +03003002 if (qp->qp_type == IB_QPT_GSI)
3003 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3004
Ram Amraniafa0e132016-10-10 13:15:36 +03003005 spin_lock_irqsave(&qp->q_lock, flags);
3006
3007 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
3008 (qp->state == QED_ROCE_QP_STATE_ERR)) {
3009 spin_unlock_irqrestore(&qp->q_lock, flags);
3010 *bad_wr = wr;
3011 DP_DEBUG(dev, QEDR_MSG_CQ,
3012 "QP in wrong state! QP icid=0x%x state %d\n",
3013 qp->icid, qp->state);
3014 return -EINVAL;
3015 }
3016
3017 if (!wr) {
3018 DP_ERR(dev, "Got an empty post send.\n");
3019 return -EINVAL;
3020 }
3021
3022 while (wr) {
3023 rc = __qedr_post_send(ibqp, wr, bad_wr);
3024 if (rc)
3025 break;
3026
3027 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3028
3029 qedr_inc_sw_prod(&qp->sq);
3030
3031 qp->sq.db_data.data.value++;
3032
3033 wr = wr->next;
3034 }
3035
3036 /* Trigger doorbell
3037 * If there was a failure in the first WR then it will be triggered in
3038 * vane. However this is not harmful (as long as the producer value is
3039 * unchanged). For performance reasons we avoid checking for this
3040 * redundant doorbell.
3041 */
3042 wmb();
3043 writel(qp->sq.db_data.raw, qp->sq.db);
3044
3045 /* Make sure write sticks */
3046 mmiowb();
3047
3048 spin_unlock_irqrestore(&qp->q_lock, flags);
3049
3050 return rc;
3051}
3052
3053int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3054 struct ib_recv_wr **bad_wr)
3055{
3056 struct qedr_qp *qp = get_qedr_qp(ibqp);
3057 struct qedr_dev *dev = qp->dev;
3058 unsigned long flags;
3059 int status = 0;
3060
Ram Amrani04886772016-10-10 13:15:38 +03003061 if (qp->qp_type == IB_QPT_GSI)
3062 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3063
Ram Amraniafa0e132016-10-10 13:15:36 +03003064 spin_lock_irqsave(&qp->q_lock, flags);
3065
3066 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
3067 (qp->state == QED_ROCE_QP_STATE_ERR)) {
3068 spin_unlock_irqrestore(&qp->q_lock, flags);
3069 *bad_wr = wr;
3070 return -EINVAL;
3071 }
3072
3073 while (wr) {
3074 int i;
3075
3076 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3077 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3078 wr->num_sge > qp->rq.max_sges) {
3079 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3080 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3081 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3082 qp->rq.max_sges);
3083 status = -ENOMEM;
3084 *bad_wr = wr;
3085 break;
3086 }
3087 for (i = 0; i < wr->num_sge; i++) {
3088 u32 flags = 0;
3089 struct rdma_rq_sge *rqe =
3090 qed_chain_produce(&qp->rq.pbl);
3091
3092 /* First one must include the number
3093 * of SGE in the list
3094 */
3095 if (!i)
3096 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3097 wr->num_sge);
3098
3099 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3100 wr->sg_list[i].lkey);
3101
3102 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3103 wr->sg_list[i].length, flags);
3104 }
3105
3106 /* Special case of no sges. FW requires between 1-4 sges...
3107 * in this case we need to post 1 sge with length zero. this is
3108 * because rdma write with immediate consumes an RQ.
3109 */
3110 if (!wr->num_sge) {
3111 u32 flags = 0;
3112 struct rdma_rq_sge *rqe =
3113 qed_chain_produce(&qp->rq.pbl);
3114
3115 /* First one must include the number
3116 * of SGE in the list
3117 */
3118 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3119 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3120
3121 RQ_SGE_SET(rqe, 0, 0, flags);
3122 i = 1;
3123 }
3124
3125 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3126 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3127
3128 qedr_inc_sw_prod(&qp->rq);
3129
3130 /* Flush all the writes before signalling doorbell */
3131 wmb();
3132
3133 qp->rq.db_data.data.value++;
3134
3135 writel(qp->rq.db_data.raw, qp->rq.db);
3136
3137 /* Make sure write sticks */
3138 mmiowb();
3139
3140 wr = wr->next;
3141 }
3142
3143 spin_unlock_irqrestore(&qp->q_lock, flags);
3144
3145 return status;
3146}
3147
3148static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3149{
3150 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3151
3152 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3153 cq->pbl_toggle;
3154}
3155
3156static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3157{
3158 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3159 struct qedr_qp *qp;
3160
3161 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3162 resp_cqe->qp_handle.lo,
3163 u64);
3164 return qp;
3165}
3166
3167static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3168{
3169 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3170
3171 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3172}
3173
3174/* Return latest CQE (needs processing) */
3175static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3176{
3177 return cq->latest_cqe;
3178}
3179
3180/* In fmr we need to increase the number of fmr completed counter for the fmr
3181 * algorithm determining whether we can free a pbl or not.
3182 * we need to perform this whether the work request was signaled or not. for
3183 * this purpose we call this function from the condition that checks if a wr
3184 * should be skipped, to make sure we don't miss it ( possibly this fmr
3185 * operation was not signalted)
3186 */
3187static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3188{
3189 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3190 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3191}
3192
3193static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3194 struct qedr_cq *cq, int num_entries,
3195 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3196 int force)
3197{
3198 u16 cnt = 0;
3199
3200 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3201 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3202 qedr_chk_if_fmr(qp);
3203 /* skip WC */
3204 goto next_cqe;
3205 }
3206
3207 /* fill WC */
3208 wc->status = status;
3209 wc->wc_flags = 0;
3210 wc->src_qp = qp->id;
3211 wc->qp = &qp->ibqp;
3212
3213 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3214 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3215
3216 switch (wc->opcode) {
3217 case IB_WC_RDMA_WRITE:
3218 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3219 break;
3220 case IB_WC_COMP_SWAP:
3221 case IB_WC_FETCH_ADD:
3222 wc->byte_len = 8;
3223 break;
3224 case IB_WC_REG_MR:
3225 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3226 break;
3227 default:
3228 break;
3229 }
3230
3231 num_entries--;
3232 wc++;
3233 cnt++;
3234next_cqe:
3235 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3236 qed_chain_consume(&qp->sq.pbl);
3237 qedr_inc_sw_cons(&qp->sq);
3238 }
3239
3240 return cnt;
3241}
3242
3243static int qedr_poll_cq_req(struct qedr_dev *dev,
3244 struct qedr_qp *qp, struct qedr_cq *cq,
3245 int num_entries, struct ib_wc *wc,
3246 struct rdma_cqe_requester *req)
3247{
3248 int cnt = 0;
3249
3250 switch (req->status) {
3251 case RDMA_CQE_REQ_STS_OK:
3252 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3253 IB_WC_SUCCESS, 0);
3254 break;
3255 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic5ea7aa2017-01-24 13:50:37 +02003256 if (qp->state != QED_ROCE_QP_STATE_ERR)
3257 DP_ERR(dev,
3258 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3259 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003260 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3261 IB_WC_WR_FLUSH_ERR, 0);
3262 break;
3263 default:
3264 /* process all WQE before the cosumer */
3265 qp->state = QED_ROCE_QP_STATE_ERR;
3266 cnt = process_req(dev, qp, cq, num_entries, wc,
3267 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3268 wc += cnt;
3269 /* if we have extra WC fill it with actual error info */
3270 if (cnt < num_entries) {
3271 enum ib_wc_status wc_status;
3272
3273 switch (req->status) {
3274 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3275 DP_ERR(dev,
3276 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3277 cq->icid, qp->icid);
3278 wc_status = IB_WC_BAD_RESP_ERR;
3279 break;
3280 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3281 DP_ERR(dev,
3282 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3283 cq->icid, qp->icid);
3284 wc_status = IB_WC_LOC_LEN_ERR;
3285 break;
3286 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3287 DP_ERR(dev,
3288 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3289 cq->icid, qp->icid);
3290 wc_status = IB_WC_LOC_QP_OP_ERR;
3291 break;
3292 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3293 DP_ERR(dev,
3294 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3295 cq->icid, qp->icid);
3296 wc_status = IB_WC_LOC_PROT_ERR;
3297 break;
3298 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3299 DP_ERR(dev,
3300 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3301 cq->icid, qp->icid);
3302 wc_status = IB_WC_MW_BIND_ERR;
3303 break;
3304 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3305 DP_ERR(dev,
3306 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3307 cq->icid, qp->icid);
3308 wc_status = IB_WC_REM_INV_REQ_ERR;
3309 break;
3310 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3311 DP_ERR(dev,
3312 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3313 cq->icid, qp->icid);
3314 wc_status = IB_WC_REM_ACCESS_ERR;
3315 break;
3316 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3317 DP_ERR(dev,
3318 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3319 cq->icid, qp->icid);
3320 wc_status = IB_WC_REM_OP_ERR;
3321 break;
3322 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3323 DP_ERR(dev,
3324 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3325 cq->icid, qp->icid);
3326 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3327 break;
3328 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3329 DP_ERR(dev,
3330 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3331 cq->icid, qp->icid);
3332 wc_status = IB_WC_RETRY_EXC_ERR;
3333 break;
3334 default:
3335 DP_ERR(dev,
3336 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3337 cq->icid, qp->icid);
3338 wc_status = IB_WC_GENERAL_ERR;
3339 }
3340 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3341 wc_status, 1);
3342 }
3343 }
3344
3345 return cnt;
3346}
3347
3348static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3349 struct qedr_cq *cq, struct ib_wc *wc,
3350 struct rdma_cqe_responder *resp, u64 wr_id)
3351{
3352 enum ib_wc_status wc_status = IB_WC_SUCCESS;
3353 u8 flags;
3354
3355 wc->opcode = IB_WC_RECV;
3356 wc->wc_flags = 0;
3357
3358 switch (resp->status) {
3359 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3360 wc_status = IB_WC_LOC_ACCESS_ERR;
3361 break;
3362 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3363 wc_status = IB_WC_LOC_LEN_ERR;
3364 break;
3365 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3366 wc_status = IB_WC_LOC_QP_OP_ERR;
3367 break;
3368 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3369 wc_status = IB_WC_LOC_PROT_ERR;
3370 break;
3371 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3372 wc_status = IB_WC_MW_BIND_ERR;
3373 break;
3374 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3375 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3376 break;
3377 case RDMA_CQE_RESP_STS_OK:
3378 wc_status = IB_WC_SUCCESS;
3379 wc->byte_len = le32_to_cpu(resp->length);
3380
3381 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3382
3383 if (flags == QEDR_RESP_RDMA_IMM)
3384 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3385
3386 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3387 wc->ex.imm_data =
3388 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3389 wc->wc_flags |= IB_WC_WITH_IMM;
3390 }
3391 break;
3392 default:
3393 wc->status = IB_WC_GENERAL_ERR;
3394 DP_ERR(dev, "Invalid CQE status detected\n");
3395 }
3396
3397 /* fill WC */
3398 wc->status = wc_status;
3399 wc->src_qp = qp->id;
3400 wc->qp = &qp->ibqp;
3401 wc->wr_id = wr_id;
3402}
3403
3404static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3405 struct qedr_cq *cq, struct ib_wc *wc,
3406 struct rdma_cqe_responder *resp)
3407{
3408 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3409
3410 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3411
3412 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3413 qed_chain_consume(&qp->rq.pbl);
3414 qedr_inc_sw_cons(&qp->rq);
3415
3416 return 1;
3417}
3418
3419static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3420 int num_entries, struct ib_wc *wc, u16 hw_cons)
3421{
3422 u16 cnt = 0;
3423
3424 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3425 /* fill WC */
3426 wc->status = IB_WC_WR_FLUSH_ERR;
3427 wc->wc_flags = 0;
3428 wc->src_qp = qp->id;
3429 wc->byte_len = 0;
3430 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3431 wc->qp = &qp->ibqp;
3432 num_entries--;
3433 wc++;
3434 cnt++;
3435 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3436 qed_chain_consume(&qp->rq.pbl);
3437 qedr_inc_sw_cons(&qp->rq);
3438 }
3439
3440 return cnt;
3441}
3442
3443static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3444 struct rdma_cqe_responder *resp, int *update)
3445{
3446 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3447 consume_cqe(cq);
3448 *update |= 1;
3449 }
3450}
3451
3452static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3453 struct qedr_cq *cq, int num_entries,
3454 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3455 int *update)
3456{
3457 int cnt;
3458
3459 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3460 cnt = process_resp_flush(qp, cq, num_entries, wc,
3461 resp->rq_cons);
3462 try_consume_resp_cqe(cq, qp, resp, update);
3463 } else {
3464 cnt = process_resp_one(dev, qp, cq, wc, resp);
3465 consume_cqe(cq);
3466 *update |= 1;
3467 }
3468
3469 return cnt;
3470}
3471
3472static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3473 struct rdma_cqe_requester *req, int *update)
3474{
3475 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3476 consume_cqe(cq);
3477 *update |= 1;
3478 }
3479}
3480
3481int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3482{
3483 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3484 struct qedr_cq *cq = get_qedr_cq(ibcq);
Kalderon, Michal7c5f3d12018-03-05 10:50:10 +02003485 union rdma_cqe *cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003486 u32 old_cons, new_cons;
3487 unsigned long flags;
3488 int update = 0;
3489 int done = 0;
3490
Ram Amrani04886772016-10-10 13:15:38 +03003491 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3492 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3493
Ram Amraniafa0e132016-10-10 13:15:36 +03003494 spin_lock_irqsave(&cq->cq_lock, flags);
Kalderon, Michal7c5f3d12018-03-05 10:50:10 +02003495 cqe = cq->latest_cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003496 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3497 while (num_entries && is_valid_cqe(cq, cqe)) {
3498 struct qedr_qp *qp;
3499 int cnt = 0;
3500
3501 /* prevent speculative reads of any field of CQE */
3502 rmb();
3503
3504 qp = cqe_get_qp(cqe);
3505 if (!qp) {
3506 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3507 break;
3508 }
3509
3510 wc->qp = &qp->ibqp;
3511
3512 switch (cqe_get_type(cqe)) {
3513 case RDMA_CQE_TYPE_REQUESTER:
3514 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3515 &cqe->req);
3516 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3517 break;
3518 case RDMA_CQE_TYPE_RESPONDER_RQ:
3519 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3520 &cqe->resp, &update);
3521 break;
3522 case RDMA_CQE_TYPE_INVALID:
3523 default:
3524 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3525 cqe_get_type(cqe));
3526 }
3527 num_entries -= cnt;
3528 wc += cnt;
3529 done += cnt;
3530
3531 cqe = get_cqe(cq);
3532 }
3533 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3534
3535 cq->cq_cons += new_cons - old_cons;
3536
3537 if (update)
3538 /* doorbell notifies abount latest VALID entry,
3539 * but chain already point to the next INVALID one
3540 */
3541 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3542
3543 spin_unlock_irqrestore(&cq->cq_lock, flags);
3544 return done;
3545}
Ram Amrani993d1b52016-10-10 13:15:39 +03003546
3547int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3548 u8 port_num,
3549 const struct ib_wc *in_wc,
3550 const struct ib_grh *in_grh,
3551 const struct ib_mad_hdr *mad_hdr,
3552 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3553 size_t *out_mad_size, u16 *out_mad_pkey_index)
3554{
3555 struct qedr_dev *dev = get_qedr_dev(ibdev);
3556
3557 DP_DEBUG(dev, QEDR_MSG_GSI,
3558 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3559 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3560 mad_hdr->class_specific, mad_hdr->class_version,
3561 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3562 return IB_MAD_RESULT_SUCCESS;
3563}
3564
3565int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3566 struct ib_port_immutable *immutable)
3567{
3568 struct ib_port_attr attr;
3569 int err;
3570
3571 err = qedr_query_port(ibdev, port_num, &attr);
3572 if (err)
3573 return err;
3574
3575 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3576 immutable->gid_tbl_len = attr.gid_tbl_len;
3577 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3578 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3579 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3580
3581 return 0;
3582}