blob: 047ac74f6ed8cdb936d6fcdced89d7b0dcebf28f [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Kalderon, Michal99d195c2017-07-26 14:41:51 +030052#include "qedr_roce_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
Amrani, Ramc75d3ec2017-06-26 19:05:04 +030056static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
Ram Amrania7efd772016-10-10 13:15:33 +030064int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
Kalderon, Michale6a38c52017-07-26 14:41:52 +030073int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
Ram Amraniac1b36e2016-10-10 13:15:32 +030087int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
88 union ib_gid *sgid)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 int rc = 0;
92
93 if (!rdma_cap_roce_gid_table(ibdev, port))
94 return -ENODEV;
95
96 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
97 if (rc == -EAGAIN) {
98 memcpy(sgid, &zgid, sizeof(*sgid));
99 return 0;
100 }
101
102 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
103 sgid->global.interface_id, sgid->global.subnet_prefix);
104
105 return rc;
106}
107
108int qedr_add_gid(struct ib_device *device, u8 port_num,
109 unsigned int index, const union ib_gid *gid,
110 const struct ib_gid_attr *attr, void **context)
111{
112 if (!rdma_cap_roce_gid_table(device, port_num))
113 return -EINVAL;
114
115 if (port_num > QEDR_MAX_PORT)
116 return -EINVAL;
117
118 if (!context)
119 return -EINVAL;
120
121 return 0;
122}
123
124int qedr_del_gid(struct ib_device *device, u8 port_num,
125 unsigned int index, void **context)
126{
127 if (!rdma_cap_roce_gid_table(device, port_num))
128 return -EINVAL;
129
130 if (port_num > QEDR_MAX_PORT)
131 return -EINVAL;
132
133 if (!context)
134 return -EINVAL;
135
136 return 0;
137}
138
139int qedr_query_device(struct ib_device *ibdev,
140 struct ib_device_attr *attr, struct ib_udata *udata)
141{
142 struct qedr_dev *dev = get_qedr_dev(ibdev);
143 struct qedr_device_attr *qattr = &dev->attr;
144
145 if (!dev->rdma_ctx) {
146 DP_ERR(dev,
147 "qedr_query_device called with invalid params rdma_ctx=%p\n",
148 dev->rdma_ctx);
149 return -EINVAL;
150 }
151
152 memset(attr, 0, sizeof(*attr));
153
154 attr->fw_ver = qattr->fw_ver;
155 attr->sys_image_guid = qattr->sys_image_guid;
156 attr->max_mr_size = qattr->max_mr_size;
157 attr->page_size_cap = qattr->page_size_caps;
158 attr->vendor_id = qattr->vendor_id;
159 attr->vendor_part_id = qattr->vendor_part_id;
160 attr->hw_ver = qattr->hw_ver;
161 attr->max_qp = qattr->max_qp;
162 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
163 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
164 IB_DEVICE_RC_RNR_NAK_GEN |
165 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
166
167 attr->max_sge = qattr->max_sge;
168 attr->max_sge_rd = qattr->max_sge;
169 attr->max_cq = qattr->max_cq;
170 attr->max_cqe = qattr->max_cqe;
171 attr->max_mr = qattr->max_mr;
172 attr->max_mw = qattr->max_mw;
173 attr->max_pd = qattr->max_pd;
174 attr->atomic_cap = dev->atomic_cap;
175 attr->max_fmr = qattr->max_fmr;
176 attr->max_map_per_fmr = 16;
177 attr->max_qp_init_rd_atom =
178 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
179 attr->max_qp_rd_atom =
180 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
181 attr->max_qp_init_rd_atom);
182
183 attr->max_srq = qattr->max_srq;
184 attr->max_srq_sge = qattr->max_srq_sge;
185 attr->max_srq_wr = qattr->max_srq_wr;
186
187 attr->local_ca_ack_delay = qattr->dev_ack_delay;
188 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
189 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
190 attr->max_ah = qattr->max_ah;
191
192 return 0;
193}
194
195#define QEDR_SPEED_SDR (1)
196#define QEDR_SPEED_DDR (2)
197#define QEDR_SPEED_QDR (4)
198#define QEDR_SPEED_FDR10 (8)
199#define QEDR_SPEED_FDR (16)
200#define QEDR_SPEED_EDR (32)
201
202static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
203 u8 *ib_width)
204{
205 switch (speed) {
206 case 1000:
207 *ib_speed = QEDR_SPEED_SDR;
208 *ib_width = IB_WIDTH_1X;
209 break;
210 case 10000:
211 *ib_speed = QEDR_SPEED_QDR;
212 *ib_width = IB_WIDTH_1X;
213 break;
214
215 case 20000:
216 *ib_speed = QEDR_SPEED_DDR;
217 *ib_width = IB_WIDTH_4X;
218 break;
219
220 case 25000:
221 *ib_speed = QEDR_SPEED_EDR;
222 *ib_width = IB_WIDTH_1X;
223 break;
224
225 case 40000:
226 *ib_speed = QEDR_SPEED_QDR;
227 *ib_width = IB_WIDTH_4X;
228 break;
229
230 case 50000:
231 *ib_speed = QEDR_SPEED_QDR;
232 *ib_width = IB_WIDTH_4X;
233 break;
234
235 case 100000:
236 *ib_speed = QEDR_SPEED_EDR;
237 *ib_width = IB_WIDTH_4X;
238 break;
239
240 default:
241 /* Unsupported */
242 *ib_speed = QEDR_SPEED_SDR;
243 *ib_width = IB_WIDTH_1X;
244 }
245}
246
247int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
248{
249 struct qedr_dev *dev;
250 struct qed_rdma_port *rdma_port;
251
252 dev = get_qedr_dev(ibdev);
253 if (port > 1) {
254 DP_ERR(dev, "invalid_port=0x%x\n", port);
255 return -EINVAL;
256 }
257
258 if (!dev->rdma_ctx) {
259 DP_ERR(dev, "rdma_ctx is NULL\n");
260 return -EINVAL;
261 }
262
263 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300264
Or Gerlitzc4550c62017-01-24 13:02:39 +0200265 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300266 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
267 attr->state = IB_PORT_ACTIVE;
268 attr->phys_state = 5;
269 } else {
270 attr->state = IB_PORT_DOWN;
271 attr->phys_state = 3;
272 }
273 attr->max_mtu = IB_MTU_4096;
274 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
275 attr->lid = 0;
276 attr->lmc = 0;
277 attr->sm_lid = 0;
278 attr->sm_sl = 0;
279 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
280 attr->gid_tbl_len = QEDR_MAX_SGID;
281 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
282 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
283 attr->qkey_viol_cntr = 0;
284 get_link_speed_and_width(rdma_port->link_speed,
285 &attr->active_speed, &attr->active_width);
286 attr->max_msg_sz = rdma_port->max_msg_size;
287 attr->max_vl_num = 4;
288
289 return 0;
290}
291
292int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
293 struct ib_port_modify *props)
294{
295 struct qedr_dev *dev;
296
297 dev = get_qedr_dev(ibdev);
298 if (port > 1) {
299 DP_ERR(dev, "invalid_port=0x%x\n", port);
300 return -EINVAL;
301 }
302
303 return 0;
304}
305
306static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
307 unsigned long len)
308{
309 struct qedr_mm *mm;
310
311 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
312 if (!mm)
313 return -ENOMEM;
314
315 mm->key.phy_addr = phy_addr;
316 /* This function might be called with a length which is not a multiple
317 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
318 * forces this granularity by increasing the requested size if needed.
319 * When qedr_mmap is called, it will search the list with the updated
320 * length as a key. To prevent search failures, the length is rounded up
321 * in advance to PAGE_SIZE.
322 */
323 mm->key.len = roundup(len, PAGE_SIZE);
324 INIT_LIST_HEAD(&mm->entry);
325
326 mutex_lock(&uctx->mm_list_lock);
327 list_add(&mm->entry, &uctx->mm_head);
328 mutex_unlock(&uctx->mm_list_lock);
329
330 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
332 (unsigned long long)mm->key.phy_addr,
333 (unsigned long)mm->key.len, uctx);
334
335 return 0;
336}
337
338static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
339 unsigned long len)
340{
341 bool found = false;
342 struct qedr_mm *mm;
343
344 mutex_lock(&uctx->mm_list_lock);
345 list_for_each_entry(mm, &uctx->mm_head, entry) {
346 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
347 continue;
348
349 found = true;
350 break;
351 }
352 mutex_unlock(&uctx->mm_list_lock);
353 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
354 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
355 mm->key.phy_addr, mm->key.len, uctx, found);
356
357 return found;
358}
359
360struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
361 struct ib_udata *udata)
362{
363 int rc;
364 struct qedr_ucontext *ctx;
365 struct qedr_alloc_ucontext_resp uresp;
366 struct qedr_dev *dev = get_qedr_dev(ibdev);
367 struct qed_rdma_add_user_out_params oparams;
368
369 if (!udata)
370 return ERR_PTR(-EFAULT);
371
372 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
373 if (!ctx)
374 return ERR_PTR(-ENOMEM);
375
376 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
377 if (rc) {
378 DP_ERR(dev,
379 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
380 rc);
381 goto err;
382 }
383
384 ctx->dpi = oparams.dpi;
385 ctx->dpi_addr = oparams.dpi_addr;
386 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
387 ctx->dpi_size = oparams.dpi_size;
388 INIT_LIST_HEAD(&ctx->mm_head);
389 mutex_init(&ctx->mm_list_lock);
390
391 memset(&uresp, 0, sizeof(uresp));
392
393 uresp.db_pa = ctx->dpi_phys_addr;
394 uresp.db_size = ctx->dpi_size;
395 uresp.max_send_wr = dev->attr.max_sqe;
396 uresp.max_recv_wr = dev->attr.max_rqe;
397 uresp.max_srq_wr = dev->attr.max_srq_wr;
398 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
399 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
400 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
401 uresp.max_cqes = QEDR_MAX_CQES;
402
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300403 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amraniac1b36e2016-10-10 13:15:32 +0300404 if (rc)
405 goto err;
406
407 ctx->dev = dev;
408
409 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
410 if (rc)
411 goto err;
412
413 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
414 &ctx->ibucontext);
415 return &ctx->ibucontext;
416
417err:
418 kfree(ctx);
419 return ERR_PTR(rc);
420}
421
422int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
423{
424 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
425 struct qedr_mm *mm, *tmp;
426 int status = 0;
427
428 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
429 uctx);
430 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
431
432 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
433 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
434 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
435 mm->key.phy_addr, mm->key.len, uctx);
436 list_del(&mm->entry);
437 kfree(mm);
438 }
439
440 kfree(uctx);
441 return status;
442}
443
444int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
445{
446 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
447 struct qedr_dev *dev = get_qedr_dev(context->device);
448 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
449 u64 unmapped_db = dev->db_phys_addr;
450 unsigned long len = (vma->vm_end - vma->vm_start);
451 int rc = 0;
452 bool found;
453
454 DP_DEBUG(dev, QEDR_MSG_INIT,
455 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
456 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
457 if (vma->vm_start & (PAGE_SIZE - 1)) {
458 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
459 vma->vm_start);
460 return -EINVAL;
461 }
462
463 found = qedr_search_mmap(ucontext, vm_page, len);
464 if (!found) {
465 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
466 vma->vm_pgoff);
467 return -EINVAL;
468 }
469
470 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
471
472 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
473 dev->db_size))) {
474 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
475 if (vma->vm_flags & VM_READ) {
476 DP_ERR(dev, "Trying to map doorbell bar for read\n");
477 return -EPERM;
478 }
479
480 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
481
482 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
483 PAGE_SIZE, vma->vm_page_prot);
484 } else {
485 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
486 rc = remap_pfn_range(vma, vma->vm_start,
487 vma->vm_pgoff, len, vma->vm_page_prot);
488 }
489 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
490 return rc;
491}
Ram Amrania7efd772016-10-10 13:15:33 +0300492
493struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
494 struct ib_ucontext *context, struct ib_udata *udata)
495{
496 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300497 struct qedr_pd *pd;
498 u16 pd_id;
499 int rc;
500
501 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
502 (udata && context) ? "User Lib" : "Kernel");
503
504 if (!dev->rdma_ctx) {
505 DP_ERR(dev, "invlaid RDMA context\n");
506 return ERR_PTR(-EINVAL);
507 }
508
509 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
510 if (!pd)
511 return ERR_PTR(-ENOMEM);
512
Ram Amrani9c1e0222017-01-24 13:51:42 +0200513 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
514 if (rc)
515 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300516
Ram Amrania7efd772016-10-10 13:15:33 +0300517 pd->pd_id = pd_id;
518
519 if (udata && context) {
Ram Amrani9c1e0222017-01-24 13:51:42 +0200520 struct qedr_alloc_pd_uresp uresp;
521
522 uresp.pd_id = pd_id;
523
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300524 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200525 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300526 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200527 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
528 goto err;
529 }
530
531 pd->uctx = get_qedr_ucontext(context);
532 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300533 }
534
535 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200536
537err:
538 kfree(pd);
539 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300540}
541
542int qedr_dealloc_pd(struct ib_pd *ibpd)
543{
544 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
545 struct qedr_pd *pd = get_qedr_pd(ibpd);
546
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100547 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300548 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100549 return -EINVAL;
550 }
Ram Amrania7efd772016-10-10 13:15:33 +0300551
552 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
553 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
554
555 kfree(pd);
556
557 return 0;
558}
559
560static void qedr_free_pbl(struct qedr_dev *dev,
561 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
562{
563 struct pci_dev *pdev = dev->pdev;
564 int i;
565
566 for (i = 0; i < pbl_info->num_pbls; i++) {
567 if (!pbl[i].va)
568 continue;
569 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
570 pbl[i].va, pbl[i].pa);
571 }
572
573 kfree(pbl);
574}
575
576#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
577#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
578
579#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
580#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
581#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
582
583static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
584 struct qedr_pbl_info *pbl_info,
585 gfp_t flags)
586{
587 struct pci_dev *pdev = dev->pdev;
588 struct qedr_pbl *pbl_table;
589 dma_addr_t *pbl_main_tbl;
590 dma_addr_t pa;
591 void *va;
592 int i;
593
594 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
595 if (!pbl_table)
596 return ERR_PTR(-ENOMEM);
597
598 for (i = 0; i < pbl_info->num_pbls; i++) {
599 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
600 &pa, flags);
601 if (!va)
602 goto err;
603
604 memset(va, 0, pbl_info->pbl_size);
605 pbl_table[i].va = va;
606 pbl_table[i].pa = pa;
607 }
608
609 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
610 * the first one with physical pointers to all of the rest
611 */
612 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
613 for (i = 0; i < pbl_info->num_pbls - 1; i++)
614 pbl_main_tbl[i] = pbl_table[i + 1].pa;
615
616 return pbl_table;
617
618err:
619 for (i--; i >= 0; i--)
620 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
621 pbl_table[i].va, pbl_table[i].pa);
622
623 qedr_free_pbl(dev, pbl_info, pbl_table);
624
625 return ERR_PTR(-ENOMEM);
626}
627
628static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
629 struct qedr_pbl_info *pbl_info,
630 u32 num_pbes, int two_layer_capable)
631{
632 u32 pbl_capacity;
633 u32 pbl_size;
634 u32 num_pbls;
635
636 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
637 if (num_pbes > MAX_PBES_TWO_LAYER) {
638 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
639 num_pbes);
640 return -EINVAL;
641 }
642
643 /* calculate required pbl page size */
644 pbl_size = MIN_FW_PBL_PAGE_SIZE;
645 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
646 NUM_PBES_ON_PAGE(pbl_size);
647
648 while (pbl_capacity < num_pbes) {
649 pbl_size *= 2;
650 pbl_capacity = pbl_size / sizeof(u64);
651 pbl_capacity = pbl_capacity * pbl_capacity;
652 }
653
654 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
655 num_pbls++; /* One for the layer0 ( points to the pbls) */
656 pbl_info->two_layered = true;
657 } else {
658 /* One layered PBL */
659 num_pbls = 1;
660 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
661 roundup_pow_of_two((num_pbes * sizeof(u64))));
662 pbl_info->two_layered = false;
663 }
664
665 pbl_info->num_pbls = num_pbls;
666 pbl_info->pbl_size = pbl_size;
667 pbl_info->num_pbes = num_pbes;
668
669 DP_DEBUG(dev, QEDR_MSG_MR,
670 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
671 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
672
673 return 0;
674}
675
676static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
677 struct qedr_pbl *pbl,
Ram Amranie57bb6b2017-06-05 16:32:27 +0300678 struct qedr_pbl_info *pbl_info, u32 pg_shift)
Ram Amrania7efd772016-10-10 13:15:33 +0300679{
680 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300681 u32 fw_pg_cnt, fw_pg_per_umem_pg;
Ram Amrania7efd772016-10-10 13:15:33 +0300682 struct qedr_pbl *pbl_tbl;
683 struct scatterlist *sg;
684 struct regpair *pbe;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300685 u64 pg_addr;
Ram Amrania7efd772016-10-10 13:15:33 +0300686 int entry;
Ram Amrania7efd772016-10-10 13:15:33 +0300687
688 if (!pbl_info->num_pbes)
689 return;
690
691 /* If we have a two layered pbl, the first pbl points to the rest
692 * of the pbls and the first entry lays on the second pbl in the table
693 */
694 if (pbl_info->two_layered)
695 pbl_tbl = &pbl[1];
696 else
697 pbl_tbl = pbl;
698
699 pbe = (struct regpair *)pbl_tbl->va;
700 if (!pbe) {
701 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
702 return;
703 }
704
705 pbe_cnt = 0;
706
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300707 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300708
Ram Amranie57bb6b2017-06-05 16:32:27 +0300709 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
710
Ram Amrania7efd772016-10-10 13:15:33 +0300711 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
712 pages = sg_dma_len(sg) >> shift;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300713 pg_addr = sg_dma_address(sg);
Ram Amrania7efd772016-10-10 13:15:33 +0300714 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
Ram Amranie57bb6b2017-06-05 16:32:27 +0300715 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
716 pbe->lo = cpu_to_le32(pg_addr);
717 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
Ram Amrania7efd772016-10-10 13:15:33 +0300718
Ram Amranie57bb6b2017-06-05 16:32:27 +0300719 pg_addr += BIT(pg_shift);
720 pbe_cnt++;
721 total_num_pbes++;
722 pbe++;
Ram Amrania7efd772016-10-10 13:15:33 +0300723
Ram Amranie57bb6b2017-06-05 16:32:27 +0300724 if (total_num_pbes == pbl_info->num_pbes)
725 return;
726
727 /* If the given pbl is full storing the pbes,
728 * move to next pbl.
729 */
730 if (pbe_cnt ==
731 (pbl_info->pbl_size / sizeof(u64))) {
732 pbl_tbl++;
733 pbe = (struct regpair *)pbl_tbl->va;
734 pbe_cnt = 0;
735 }
736
737 fw_pg_cnt++;
Ram Amrania7efd772016-10-10 13:15:33 +0300738 }
739 }
740 }
741}
742
743static int qedr_copy_cq_uresp(struct qedr_dev *dev,
744 struct qedr_cq *cq, struct ib_udata *udata)
745{
746 struct qedr_create_cq_uresp uresp;
747 int rc;
748
749 memset(&uresp, 0, sizeof(uresp));
750
751 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
752 uresp.icid = cq->icid;
753
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300754 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrania7efd772016-10-10 13:15:33 +0300755 if (rc)
756 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
757
758 return rc;
759}
760
761static void consume_cqe(struct qedr_cq *cq)
762{
763 if (cq->latest_cqe == cq->toggle_cqe)
764 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
765
766 cq->latest_cqe = qed_chain_consume(&cq->pbl);
767}
768
769static inline int qedr_align_cq_entries(int entries)
770{
771 u64 size, aligned_size;
772
773 /* We allocate an extra entry that we don't report to the FW. */
774 size = (entries + 1) * QEDR_CQE_SIZE;
775 aligned_size = ALIGN(size, PAGE_SIZE);
776
777 return aligned_size / QEDR_CQE_SIZE;
778}
779
780static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
781 struct qedr_dev *dev,
782 struct qedr_userq *q,
783 u64 buf_addr, size_t buf_len,
784 int access, int dmasync)
785{
Ram Amranie57bb6b2017-06-05 16:32:27 +0300786 u32 fw_pages;
Ram Amrania7efd772016-10-10 13:15:33 +0300787 int rc;
788
789 q->buf_addr = buf_addr;
790 q->buf_len = buf_len;
791 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
792 if (IS_ERR(q->umem)) {
793 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
794 PTR_ERR(q->umem));
795 return PTR_ERR(q->umem);
796 }
797
Ram Amranie57bb6b2017-06-05 16:32:27 +0300798 fw_pages = ib_umem_page_count(q->umem) <<
799 (q->umem->page_shift - FW_PAGE_SHIFT);
800
801 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
Ram Amrania7efd772016-10-10 13:15:33 +0300802 if (rc)
803 goto err0;
804
805 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +0100806 if (IS_ERR(q->pbl_tbl)) {
807 rc = PTR_ERR(q->pbl_tbl);
Ram Amrania7efd772016-10-10 13:15:33 +0300808 goto err0;
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +0100809 }
Ram Amrania7efd772016-10-10 13:15:33 +0300810
Ram Amranie57bb6b2017-06-05 16:32:27 +0300811 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
812 FW_PAGE_SHIFT);
Ram Amrania7efd772016-10-10 13:15:33 +0300813
814 return 0;
815
816err0:
817 ib_umem_release(q->umem);
818
819 return rc;
820}
821
822static inline void qedr_init_cq_params(struct qedr_cq *cq,
823 struct qedr_ucontext *ctx,
824 struct qedr_dev *dev, int vector,
825 int chain_entries, int page_cnt,
826 u64 pbl_ptr,
827 struct qed_rdma_create_cq_in_params
828 *params)
829{
830 memset(params, 0, sizeof(*params));
831 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
832 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
833 params->cnq_id = vector;
834 params->cq_size = chain_entries - 1;
835 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
836 params->pbl_num_pages = page_cnt;
837 params->pbl_ptr = pbl_ptr;
838 params->pbl_two_level = 0;
839}
840
841static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
842{
843 /* Flush data before signalling doorbell */
844 wmb();
845 cq->db.data.agg_flags = flags;
846 cq->db.data.value = cpu_to_le32(cons);
847 writeq(cq->db.raw, cq->db_addr);
848
849 /* Make sure write would stick */
850 mmiowb();
851}
852
853int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
854{
855 struct qedr_cq *cq = get_qedr_cq(ibcq);
856 unsigned long sflags;
Amrani, Ram4dd72632017-04-27 13:35:34 +0300857 struct qedr_dev *dev;
858
859 dev = get_qedr_dev(ibcq->device);
860
861 if (cq->destroyed) {
862 DP_ERR(dev,
863 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
864 cq, cq->icid);
865 return -EINVAL;
866 }
867
Ram Amrania7efd772016-10-10 13:15:33 +0300868
869 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
870 return 0;
871
872 spin_lock_irqsave(&cq->cq_lock, sflags);
873
874 cq->arm_flags = 0;
875
876 if (flags & IB_CQ_SOLICITED)
877 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
878
879 if (flags & IB_CQ_NEXT_COMP)
880 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
881
882 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
883
884 spin_unlock_irqrestore(&cq->cq_lock, sflags);
885
886 return 0;
887}
888
889struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
890 const struct ib_cq_init_attr *attr,
891 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
892{
893 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
894 struct qed_rdma_destroy_cq_out_params destroy_oparams;
895 struct qed_rdma_destroy_cq_in_params destroy_iparams;
896 struct qedr_dev *dev = get_qedr_dev(ibdev);
897 struct qed_rdma_create_cq_in_params params;
898 struct qedr_create_cq_ureq ureq;
899 int vector = attr->comp_vector;
900 int entries = attr->cqe;
901 struct qedr_cq *cq;
902 int chain_entries;
903 int page_cnt;
904 u64 pbl_ptr;
905 u16 icid;
906 int rc;
907
908 DP_DEBUG(dev, QEDR_MSG_INIT,
909 "create_cq: called from %s. entries=%d, vector=%d\n",
910 udata ? "User Lib" : "Kernel", entries, vector);
911
912 if (entries > QEDR_MAX_CQES) {
913 DP_ERR(dev,
914 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
915 entries, QEDR_MAX_CQES);
916 return ERR_PTR(-EINVAL);
917 }
918
919 chain_entries = qedr_align_cq_entries(entries);
920 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
921
922 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
923 if (!cq)
924 return ERR_PTR(-ENOMEM);
925
926 if (udata) {
927 memset(&ureq, 0, sizeof(ureq));
928 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
929 DP_ERR(dev,
930 "create cq: problem copying data from user space\n");
931 goto err0;
932 }
933
934 if (!ureq.len) {
935 DP_ERR(dev,
936 "create cq: cannot create a cq with 0 entries\n");
937 goto err0;
938 }
939
940 cq->cq_type = QEDR_CQ_TYPE_USER;
941
942 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
943 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
944 if (rc)
945 goto err0;
946
947 pbl_ptr = cq->q.pbl_tbl->pa;
948 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200949
950 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300951 } else {
952 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
953
954 rc = dev->ops->common->chain_alloc(dev->cdev,
955 QED_CHAIN_USE_TO_CONSUME,
956 QED_CHAIN_MODE_PBL,
957 QED_CHAIN_CNT_TYPE_U32,
958 chain_entries,
959 sizeof(union rdma_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +0300960 &cq->pbl, NULL);
Ram Amrania7efd772016-10-10 13:15:33 +0300961 if (rc)
962 goto err1;
963
964 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
965 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200966 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300967 }
968
969 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
970 pbl_ptr, &params);
971
972 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
973 if (rc)
974 goto err2;
975
976 cq->icid = icid;
977 cq->sig = QEDR_CQ_MAGIC_NUMBER;
978 spin_lock_init(&cq->cq_lock);
979
980 if (ib_ctx) {
981 rc = qedr_copy_cq_uresp(dev, cq, udata);
982 if (rc)
983 goto err3;
984 } else {
985 /* Generate doorbell address. */
986 cq->db_addr = dev->db_addr +
987 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
988 cq->db.data.icid = cq->icid;
989 cq->db.data.params = DB_AGG_CMD_SET <<
990 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
991
992 /* point to the very last element, passing it we will toggle */
993 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
994 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
995 cq->latest_cqe = NULL;
996 consume_cqe(cq);
997 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
998 }
999
1000 DP_DEBUG(dev, QEDR_MSG_CQ,
1001 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1002 cq->icid, cq, params.cq_size);
1003
1004 return &cq->ibcq;
1005
1006err3:
1007 destroy_iparams.icid = cq->icid;
1008 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1009 &destroy_oparams);
1010err2:
1011 if (udata)
1012 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1013 else
1014 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1015err1:
1016 if (udata)
1017 ib_umem_release(cq->q.umem);
1018err0:
1019 kfree(cq);
1020 return ERR_PTR(-EINVAL);
1021}
1022
1023int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1024{
1025 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1026 struct qedr_cq *cq = get_qedr_cq(ibcq);
1027
1028 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1029
1030 return 0;
1031}
1032
Amrani, Ram4dd72632017-04-27 13:35:34 +03001033#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1034#define QEDR_DESTROY_CQ_ITER_DURATION (10)
1035
Ram Amrania7efd772016-10-10 13:15:33 +03001036int qedr_destroy_cq(struct ib_cq *ibcq)
1037{
1038 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1039 struct qed_rdma_destroy_cq_out_params oparams;
1040 struct qed_rdma_destroy_cq_in_params iparams;
1041 struct qedr_cq *cq = get_qedr_cq(ibcq);
Amrani, Ram4dd72632017-04-27 13:35:34 +03001042 int iter;
Amrani, Ram942b3b22017-04-27 13:35:33 +03001043 int rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001044
Amrani, Ram942b3b22017-04-27 13:35:33 +03001045 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
Ram Amrania7efd772016-10-10 13:15:33 +03001046
Amrani, Ram4dd72632017-04-27 13:35:34 +03001047 cq->destroyed = 1;
1048
Ram Amrania7efd772016-10-10 13:15:33 +03001049 /* GSIs CQs are handled by driver, so they don't exist in the FW */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001050 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1051 goto done;
Amrani, Rama1211352016-12-22 14:40:34 +02001052
Amrani, Ram942b3b22017-04-27 13:35:33 +03001053 iparams.icid = cq->icid;
1054 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1055 if (rc)
1056 return rc;
1057
1058 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +03001059
1060 if (ibcq->uobject && ibcq->uobject->context) {
1061 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1062 ib_umem_release(cq->q.umem);
1063 }
1064
Amrani, Ram4dd72632017-04-27 13:35:34 +03001065 /* We don't want the IRQ handler to handle a non-existing CQ so we
1066 * wait until all CNQ interrupts, if any, are received. This will always
1067 * happen and will always happen very fast. If not, then a serious error
1068 * has occured. That is why we can use a long delay.
1069 * We spin for a short time so we don’t lose time on context switching
1070 * in case all the completions are handled in that span. Otherwise
1071 * we sleep for a while and check again. Since the CNQ may be
1072 * associated with (only) the current CPU we use msleep to allow the
1073 * current CPU to be freed.
1074 * The CNQ notification is increased in qedr_irq_handler().
1075 */
1076 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1077 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1078 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1079 iter--;
1080 }
1081
1082 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1083 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1084 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1085 iter--;
1086 }
1087
1088 if (oparams.num_cq_notif != cq->cnq_notif)
1089 goto err;
1090
1091 /* Note that we don't need to have explicit code to wait for the
1092 * completion of the event handler because it is invoked from the EQ.
1093 * Since the destroy CQ ramrod has also been received on the EQ we can
1094 * be certain that there's no event handler in process.
1095 */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001096done:
Amrani, Ram4dd72632017-04-27 13:35:34 +03001097 cq->sig = ~cq->sig;
1098
Ram Amrania7efd772016-10-10 13:15:33 +03001099 kfree(cq);
1100
1101 return 0;
Amrani, Ram4dd72632017-04-27 13:35:34 +03001102
1103err:
1104 DP_ERR(dev,
1105 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1106 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1107
1108 return -EINVAL;
Ram Amrania7efd772016-10-10 13:15:33 +03001109}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001110
1111static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1112 struct ib_qp_attr *attr,
1113 int attr_mask,
1114 struct qed_rdma_modify_qp_in_params
1115 *qp_params)
1116{
1117 enum rdma_network_type nw_type;
1118 struct ib_gid_attr gid_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001119 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001120 union ib_gid gid;
1121 u32 ipv4_addr;
1122 int rc = 0;
1123 int i;
1124
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001125 rc = ib_get_cached_gid(ibqp->device,
1126 rdma_ah_get_port_num(&attr->ah_attr),
1127 grh->sgid_index, &gid, &gid_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001128 if (rc)
1129 return rc;
1130
1131 if (!memcmp(&gid, &zgid, sizeof(gid)))
1132 return -ENOENT;
1133
1134 if (gid_attr.ndev) {
1135 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1136
1137 dev_put(gid_attr.ndev);
1138 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1139 switch (nw_type) {
1140 case RDMA_NETWORK_IPV6:
1141 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1142 sizeof(qp_params->sgid));
1143 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001144 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001145 sizeof(qp_params->dgid));
1146 qp_params->roce_mode = ROCE_V2_IPV6;
1147 SET_FIELD(qp_params->modify_flags,
1148 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1149 break;
1150 case RDMA_NETWORK_IB:
1151 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1152 sizeof(qp_params->sgid));
1153 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001154 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001155 sizeof(qp_params->dgid));
1156 qp_params->roce_mode = ROCE_V1;
1157 break;
1158 case RDMA_NETWORK_IPV4:
1159 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1160 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1161 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1162 qp_params->sgid.ipv4_addr = ipv4_addr;
1163 ipv4_addr =
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001164 qedr_get_ipv4_from_gid(grh->dgid.raw);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001165 qp_params->dgid.ipv4_addr = ipv4_addr;
1166 SET_FIELD(qp_params->modify_flags,
1167 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1168 qp_params->roce_mode = ROCE_V2_IPV4;
1169 break;
1170 }
1171 }
1172
1173 for (i = 0; i < 4; i++) {
1174 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1175 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1176 }
1177
1178 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1179 qp_params->vlan_id = 0;
1180
1181 return 0;
1182}
1183
Ram Amranicecbcdd2016-10-10 13:15:34 +03001184static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1185 struct ib_qp_init_attr *attrs)
1186{
1187 struct qedr_device_attr *qattr = &dev->attr;
1188
1189 /* QP0... attrs->qp_type == IB_QPT_GSI */
1190 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1191 DP_DEBUG(dev, QEDR_MSG_QP,
1192 "create qp: unsupported qp type=0x%x requested\n",
1193 attrs->qp_type);
1194 return -EINVAL;
1195 }
1196
1197 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1198 DP_ERR(dev,
1199 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1200 attrs->cap.max_send_wr, qattr->max_sqe);
1201 return -EINVAL;
1202 }
1203
1204 if (attrs->cap.max_inline_data > qattr->max_inline) {
1205 DP_ERR(dev,
1206 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1207 attrs->cap.max_inline_data, qattr->max_inline);
1208 return -EINVAL;
1209 }
1210
1211 if (attrs->cap.max_send_sge > qattr->max_sge) {
1212 DP_ERR(dev,
1213 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1214 attrs->cap.max_send_sge, qattr->max_sge);
1215 return -EINVAL;
1216 }
1217
1218 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1219 DP_ERR(dev,
1220 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1221 attrs->cap.max_recv_sge, qattr->max_sge);
1222 return -EINVAL;
1223 }
1224
1225 /* Unprivileged user space cannot create special QP */
1226 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1227 DP_ERR(dev,
1228 "create qp: userspace can't create special QPs of type=0x%x\n",
1229 attrs->qp_type);
1230 return -EINVAL;
1231 }
1232
1233 return 0;
1234}
1235
1236static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1237 struct qedr_qp *qp)
1238{
1239 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1240 uresp->rq_icid = qp->icid;
1241}
1242
1243static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1244 struct qedr_qp *qp)
1245{
1246 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1247 uresp->sq_icid = qp->icid + 1;
1248}
1249
1250static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1251 struct qedr_qp *qp, struct ib_udata *udata)
1252{
1253 struct qedr_create_qp_uresp uresp;
1254 int rc;
1255
1256 memset(&uresp, 0, sizeof(uresp));
1257 qedr_copy_sq_uresp(&uresp, qp);
1258 qedr_copy_rq_uresp(&uresp, qp);
1259
1260 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1261 uresp.qp_id = qp->qp_id;
1262
Amrani, Ramc75d3ec2017-06-26 19:05:04 +03001263 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranicecbcdd2016-10-10 13:15:34 +03001264 if (rc)
1265 DP_ERR(dev,
1266 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1267 qp->icid);
1268
1269 return rc;
1270}
1271
Amrani, Ramdf158562016-12-22 14:52:24 +02001272static void qedr_set_common_qp_params(struct qedr_dev *dev,
1273 struct qedr_qp *qp,
1274 struct qedr_pd *pd,
1275 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001276{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001277 spin_lock_init(&qp->q_lock);
Amrani, Ramdf158562016-12-22 14:52:24 +02001278 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001279 qp->qp_type = attrs->qp_type;
1280 qp->max_inline_data = attrs->cap.max_inline_data;
1281 qp->sq.max_sges = attrs->cap.max_send_sge;
1282 qp->state = QED_ROCE_QP_STATE_RESET;
1283 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1284 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1285 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1286 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001287 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001288
1289 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001290 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1291 qp->rq.max_sges, qp->rq_cq->icid);
1292 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001293 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1294 pd->pd_id, qp->qp_type, qp->max_inline_data,
1295 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1296 DP_DEBUG(dev, QEDR_MSG_QP,
1297 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1298 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001299}
1300
Amrani, Ramdf158562016-12-22 14:52:24 +02001301static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001302{
1303 qp->sq.db = dev->db_addr +
1304 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1305 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001306 qp->rq.db = dev->db_addr +
1307 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1308 qp->rq.db_data.data.icid = qp->icid;
1309}
1310
Amrani, Ramdf158562016-12-22 14:52:24 +02001311static inline void
1312qedr_init_common_qp_in_params(struct qedr_dev *dev,
1313 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001314 struct qedr_qp *qp,
1315 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001316 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001317 struct qed_rdma_create_qp_in_params *params)
1318{
Amrani, Ramdf158562016-12-22 14:52:24 +02001319 /* QP handle to be written in an async event */
1320 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1321 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001322
Amrani, Ramdf158562016-12-22 14:52:24 +02001323 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1324 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1325 params->pd = pd->pd_id;
1326 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1327 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1328 params->stats_queue = 0;
1329 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1330 params->srq_id = 0;
1331 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001332}
1333
Amrani, Ramdf158562016-12-22 14:52:24 +02001334static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001335{
Amrani, Ramdf158562016-12-22 14:52:24 +02001336 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1337 "qp=%p. "
1338 "sq_addr=0x%llx, "
1339 "sq_len=%zd, "
1340 "rq_addr=0x%llx, "
1341 "rq_len=%zd"
1342 "\n",
1343 qp,
1344 qp->usq.buf_addr,
1345 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1346}
1347
1348static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1349{
1350 if (qp->usq.umem)
1351 ib_umem_release(qp->usq.umem);
1352 qp->usq.umem = NULL;
1353
1354 if (qp->urq.umem)
1355 ib_umem_release(qp->urq.umem);
1356 qp->urq.umem = NULL;
1357}
1358
1359static int qedr_create_user_qp(struct qedr_dev *dev,
1360 struct qedr_qp *qp,
1361 struct ib_pd *ibpd,
1362 struct ib_udata *udata,
1363 struct ib_qp_init_attr *attrs)
1364{
1365 struct qed_rdma_create_qp_in_params in_params;
1366 struct qed_rdma_create_qp_out_params out_params;
1367 struct qedr_pd *pd = get_qedr_pd(ibpd);
1368 struct ib_ucontext *ib_ctx = NULL;
1369 struct qedr_ucontext *ctx = NULL;
1370 struct qedr_create_qp_ureq ureq;
1371 int rc = -EINVAL;
1372
1373 ib_ctx = ibpd->uobject->context;
1374 ctx = get_qedr_ucontext(ib_ctx);
1375
1376 memset(&ureq, 0, sizeof(ureq));
1377 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1378 if (rc) {
1379 DP_ERR(dev, "Problem copying data from user space\n");
1380 return rc;
1381 }
1382
1383 /* SQ - read access only (0), dma sync not required (0) */
1384 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
1385 ureq.sq_len, 0, 0);
1386 if (rc)
1387 return rc;
1388
1389 /* RQ - read access only (0), dma sync not required (0) */
1390 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
1391 ureq.rq_len, 0, 0);
1392
1393 if (rc)
1394 return rc;
1395
1396 memset(&in_params, 0, sizeof(in_params));
1397 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1398 in_params.qp_handle_lo = ureq.qp_handle_lo;
1399 in_params.qp_handle_hi = ureq.qp_handle_hi;
1400 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1401 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1402 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1403 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1404
1405 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1406 &in_params, &out_params);
1407
1408 if (!qp->qed_qp) {
1409 rc = -ENOMEM;
1410 goto err1;
1411 }
1412
1413 qp->qp_id = out_params.qp_id;
1414 qp->icid = out_params.icid;
1415
1416 rc = qedr_copy_qp_uresp(dev, qp, udata);
1417 if (rc)
1418 goto err;
1419
1420 qedr_qp_user_print(dev, qp);
1421
1422 return 0;
1423err:
1424 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1425 if (rc)
1426 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1427
1428err1:
1429 qedr_cleanup_user(dev, qp);
1430 return rc;
1431}
1432
1433static int
1434qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1435 struct qedr_qp *qp,
1436 struct qed_rdma_create_qp_in_params *in_params,
1437 u32 n_sq_elems, u32 n_rq_elems)
1438{
1439 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001440 int rc;
1441
Ram Amranicecbcdd2016-10-10 13:15:34 +03001442 rc = dev->ops->common->chain_alloc(dev->cdev,
1443 QED_CHAIN_USE_TO_PRODUCE,
1444 QED_CHAIN_MODE_PBL,
1445 QED_CHAIN_CNT_TYPE_U32,
1446 n_sq_elems,
1447 QEDR_SQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001448 &qp->sq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001449
1450 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001451 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001452
Amrani, Ramdf158562016-12-22 14:52:24 +02001453 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1454 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001455
Ram Amranicecbcdd2016-10-10 13:15:34 +03001456 rc = dev->ops->common->chain_alloc(dev->cdev,
1457 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1458 QED_CHAIN_MODE_PBL,
1459 QED_CHAIN_CNT_TYPE_U32,
1460 n_rq_elems,
1461 QEDR_RQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001462 &qp->rq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001463 if (rc)
1464 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001465
Amrani, Ramdf158562016-12-22 14:52:24 +02001466 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1467 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001468
Amrani, Ramdf158562016-12-22 14:52:24 +02001469 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1470 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001471
Amrani, Ramdf158562016-12-22 14:52:24 +02001472 if (!qp->qed_qp)
1473 return -EINVAL;
1474
1475 qp->qp_id = out_params.qp_id;
1476 qp->icid = out_params.icid;
1477
1478 qedr_set_roce_db_info(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001479
1480 return 0;
1481}
1482
Amrani, Ramdf158562016-12-22 14:52:24 +02001483static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001484{
Amrani, Ramdf158562016-12-22 14:52:24 +02001485 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1486 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001487
Amrani, Ramdf158562016-12-22 14:52:24 +02001488 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1489 kfree(qp->rqe_wr_id);
1490}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001491
Amrani, Ramdf158562016-12-22 14:52:24 +02001492static int qedr_create_kernel_qp(struct qedr_dev *dev,
1493 struct qedr_qp *qp,
1494 struct ib_pd *ibpd,
1495 struct ib_qp_init_attr *attrs)
1496{
1497 struct qed_rdma_create_qp_in_params in_params;
1498 struct qedr_pd *pd = get_qedr_pd(ibpd);
1499 int rc = -EINVAL;
1500 u32 n_rq_elems;
1501 u32 n_sq_elems;
1502 u32 n_sq_entries;
1503
1504 memset(&in_params, 0, sizeof(in_params));
1505
1506 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1507 * the ring. The ring should allow at least a single WR, even if the
1508 * user requested none, due to allocation issues.
1509 * We should add an extra WR since the prod and cons indices of
1510 * wqe_wr_id are managed in such a way that the WQ is considered full
1511 * when (prod+1)%max_wr==cons. We currently don't do that because we
1512 * double the number of entries due an iSER issue that pushes far more
1513 * WRs than indicated. If we decline its ib_post_send() then we get
1514 * error prints in the dmesg we'd like to avoid.
1515 */
1516 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1517 dev->attr.max_sqe);
1518
1519 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1520 GFP_KERNEL);
1521 if (!qp->wqe_wr_id) {
1522 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1523 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001524 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001525
Amrani, Ramdf158562016-12-22 14:52:24 +02001526 /* QP handle to be written in CQE */
1527 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1528 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001529
Amrani, Ramdf158562016-12-22 14:52:24 +02001530 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1531 * the ring. There ring should allow at least a single WR, even if the
1532 * user requested none, due to allocation issues.
1533 */
1534 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1535
1536 /* Allocate driver internal RQ array */
1537 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1538 GFP_KERNEL);
1539 if (!qp->rqe_wr_id) {
1540 DP_ERR(dev,
1541 "create qp: failed RQ shadow memory allocation\n");
1542 kfree(qp->wqe_wr_id);
1543 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001544 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001545
Amrani, Ramdf158562016-12-22 14:52:24 +02001546 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001547
Amrani, Ramdf158562016-12-22 14:52:24 +02001548 n_sq_entries = attrs->cap.max_send_wr;
1549 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1550 n_sq_entries = max_t(u32, n_sq_entries, 1);
1551 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001552
Amrani, Ramdf158562016-12-22 14:52:24 +02001553 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1554
1555 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1556 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001557 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001558 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001559
1560 return rc;
1561}
1562
1563struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1564 struct ib_qp_init_attr *attrs,
1565 struct ib_udata *udata)
1566{
1567 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001568 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001569 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001570 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001571 int rc = 0;
1572
1573 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1574 udata ? "user library" : "kernel", pd);
1575
1576 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1577 if (rc)
1578 return ERR_PTR(rc);
1579
Wei Yongjun181d8012016-10-28 16:33:47 +00001580 if (attrs->srq)
1581 return ERR_PTR(-EINVAL);
1582
Ram Amranicecbcdd2016-10-10 13:15:34 +03001583 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001584 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1585 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001586 get_qedr_cq(attrs->send_cq),
1587 get_qedr_cq(attrs->send_cq)->icid,
1588 get_qedr_cq(attrs->recv_cq),
1589 get_qedr_cq(attrs->recv_cq)->icid);
1590
Amrani, Ramdf158562016-12-22 14:52:24 +02001591 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1592 if (!qp) {
1593 DP_ERR(dev, "create qp: failed allocating memory\n");
1594 return ERR_PTR(-ENOMEM);
1595 }
1596
1597 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001598
Ram Amrani04886772016-10-10 13:15:38 +03001599 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001600 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1601 if (IS_ERR(ibqp))
1602 kfree(qp);
1603 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001604 }
1605
Amrani, Ramdf158562016-12-22 14:52:24 +02001606 if (udata)
1607 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1608 else
1609 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001610
Amrani, Ramdf158562016-12-22 14:52:24 +02001611 if (rc)
1612 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001613
Ram Amranicecbcdd2016-10-10 13:15:34 +03001614 qp->ibqp.qp_num = qp->qp_id;
1615
Ram Amranicecbcdd2016-10-10 13:15:34 +03001616 return &qp->ibqp;
1617
Amrani, Ramdf158562016-12-22 14:52:24 +02001618err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001619 kfree(qp);
1620
1621 return ERR_PTR(-EFAULT);
1622}
1623
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001624static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001625{
1626 switch (qp_state) {
1627 case QED_ROCE_QP_STATE_RESET:
1628 return IB_QPS_RESET;
1629 case QED_ROCE_QP_STATE_INIT:
1630 return IB_QPS_INIT;
1631 case QED_ROCE_QP_STATE_RTR:
1632 return IB_QPS_RTR;
1633 case QED_ROCE_QP_STATE_RTS:
1634 return IB_QPS_RTS;
1635 case QED_ROCE_QP_STATE_SQD:
1636 return IB_QPS_SQD;
1637 case QED_ROCE_QP_STATE_ERR:
1638 return IB_QPS_ERR;
1639 case QED_ROCE_QP_STATE_SQE:
1640 return IB_QPS_SQE;
1641 }
1642 return IB_QPS_ERR;
1643}
1644
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001645static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1646 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001647{
1648 switch (qp_state) {
1649 case IB_QPS_RESET:
1650 return QED_ROCE_QP_STATE_RESET;
1651 case IB_QPS_INIT:
1652 return QED_ROCE_QP_STATE_INIT;
1653 case IB_QPS_RTR:
1654 return QED_ROCE_QP_STATE_RTR;
1655 case IB_QPS_RTS:
1656 return QED_ROCE_QP_STATE_RTS;
1657 case IB_QPS_SQD:
1658 return QED_ROCE_QP_STATE_SQD;
1659 case IB_QPS_ERR:
1660 return QED_ROCE_QP_STATE_ERR;
1661 default:
1662 return QED_ROCE_QP_STATE_ERR;
1663 }
1664}
1665
1666static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1667{
1668 qed_chain_reset(&qph->pbl);
1669 qph->prod = 0;
1670 qph->cons = 0;
1671 qph->wqe_cons = 0;
1672 qph->db_data.data.value = cpu_to_le16(0);
1673}
1674
1675static int qedr_update_qp_state(struct qedr_dev *dev,
1676 struct qedr_qp *qp,
1677 enum qed_roce_qp_state new_state)
1678{
1679 int status = 0;
1680
1681 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001682 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001683
1684 switch (qp->state) {
1685 case QED_ROCE_QP_STATE_RESET:
1686 switch (new_state) {
1687 case QED_ROCE_QP_STATE_INIT:
1688 qp->prev_wqe_size = 0;
1689 qedr_reset_qp_hwq_info(&qp->sq);
1690 qedr_reset_qp_hwq_info(&qp->rq);
1691 break;
1692 default:
1693 status = -EINVAL;
1694 break;
1695 };
1696 break;
1697 case QED_ROCE_QP_STATE_INIT:
1698 switch (new_state) {
1699 case QED_ROCE_QP_STATE_RTR:
1700 /* Update doorbell (in case post_recv was
1701 * done before move to RTR)
1702 */
1703 wmb();
1704 writel(qp->rq.db_data.raw, qp->rq.db);
1705 /* Make sure write takes effect */
1706 mmiowb();
1707 break;
1708 case QED_ROCE_QP_STATE_ERR:
1709 break;
1710 default:
1711 /* Invalid state change. */
1712 status = -EINVAL;
1713 break;
1714 };
1715 break;
1716 case QED_ROCE_QP_STATE_RTR:
1717 /* RTR->XXX */
1718 switch (new_state) {
1719 case QED_ROCE_QP_STATE_RTS:
1720 break;
1721 case QED_ROCE_QP_STATE_ERR:
1722 break;
1723 default:
1724 /* Invalid state change. */
1725 status = -EINVAL;
1726 break;
1727 };
1728 break;
1729 case QED_ROCE_QP_STATE_RTS:
1730 /* RTS->XXX */
1731 switch (new_state) {
1732 case QED_ROCE_QP_STATE_SQD:
1733 break;
1734 case QED_ROCE_QP_STATE_ERR:
1735 break;
1736 default:
1737 /* Invalid state change. */
1738 status = -EINVAL;
1739 break;
1740 };
1741 break;
1742 case QED_ROCE_QP_STATE_SQD:
1743 /* SQD->XXX */
1744 switch (new_state) {
1745 case QED_ROCE_QP_STATE_RTS:
1746 case QED_ROCE_QP_STATE_ERR:
1747 break;
1748 default:
1749 /* Invalid state change. */
1750 status = -EINVAL;
1751 break;
1752 };
1753 break;
1754 case QED_ROCE_QP_STATE_ERR:
1755 /* ERR->XXX */
1756 switch (new_state) {
1757 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001758 if ((qp->rq.prod != qp->rq.cons) ||
1759 (qp->sq.prod != qp->sq.cons)) {
1760 DP_NOTICE(dev,
1761 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1762 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1763 qp->sq.cons);
1764 status = -EINVAL;
1765 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001766 break;
1767 default:
1768 status = -EINVAL;
1769 break;
1770 };
1771 break;
1772 default:
1773 status = -EINVAL;
1774 break;
1775 };
1776
1777 return status;
1778}
1779
1780int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1781 int attr_mask, struct ib_udata *udata)
1782{
1783 struct qedr_qp *qp = get_qedr_qp(ibqp);
1784 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1785 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001786 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001787 enum ib_qp_state old_qp_state, new_qp_state;
1788 int rc = 0;
1789
1790 DP_DEBUG(dev, QEDR_MSG_QP,
1791 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1792 attr->qp_state);
1793
1794 old_qp_state = qedr_get_ibqp_state(qp->state);
1795 if (attr_mask & IB_QP_STATE)
1796 new_qp_state = attr->qp_state;
1797 else
1798 new_qp_state = old_qp_state;
1799
1800 if (!ib_modify_qp_is_ok
1801 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1802 IB_LINK_LAYER_ETHERNET)) {
1803 DP_ERR(dev,
1804 "modify qp: invalid attribute mask=0x%x specified for\n"
1805 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1806 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1807 new_qp_state);
1808 rc = -EINVAL;
1809 goto err;
1810 }
1811
1812 /* Translate the masks... */
1813 if (attr_mask & IB_QP_STATE) {
1814 SET_FIELD(qp_params.modify_flags,
1815 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1816 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1817 }
1818
1819 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1820 qp_params.sqd_async = true;
1821
1822 if (attr_mask & IB_QP_PKEY_INDEX) {
1823 SET_FIELD(qp_params.modify_flags,
1824 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1825 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1826 rc = -EINVAL;
1827 goto err;
1828 }
1829
1830 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1831 }
1832
1833 if (attr_mask & IB_QP_QKEY)
1834 qp->qkey = attr->qkey;
1835
1836 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1837 SET_FIELD(qp_params.modify_flags,
1838 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1839 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1840 IB_ACCESS_REMOTE_READ;
1841 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1842 IB_ACCESS_REMOTE_WRITE;
1843 qp_params.incoming_atomic_en = attr->qp_access_flags &
1844 IB_ACCESS_REMOTE_ATOMIC;
1845 }
1846
1847 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1848 if (attr_mask & IB_QP_PATH_MTU) {
1849 if (attr->path_mtu < IB_MTU_256 ||
1850 attr->path_mtu > IB_MTU_4096) {
1851 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1852 rc = -EINVAL;
1853 goto err;
1854 }
1855 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1856 ib_mtu_enum_to_int(iboe_get_mtu
1857 (dev->ndev->mtu)));
1858 }
1859
1860 if (!qp->mtu) {
1861 qp->mtu =
1862 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1863 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1864 }
1865
1866 SET_FIELD(qp_params.modify_flags,
1867 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1868
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001869 qp_params.traffic_class_tos = grh->traffic_class;
1870 qp_params.flow_label = grh->flow_label;
1871 qp_params.hop_limit_ttl = grh->hop_limit;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001872
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001873 qp->sgid_idx = grh->sgid_index;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001874
1875 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1876 if (rc) {
1877 DP_ERR(dev,
1878 "modify qp: problems with GID index %d (rc=%d)\n",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001879 grh->sgid_index, rc);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001880 return rc;
1881 }
1882
1883 rc = qedr_get_dmac(dev, &attr->ah_attr,
1884 qp_params.remote_mac_addr);
1885 if (rc)
1886 return rc;
1887
1888 qp_params.use_local_mac = true;
1889 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1890
1891 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1892 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1893 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1894 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1895 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1896 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1897 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1898 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001899
1900 qp_params.mtu = qp->mtu;
1901 qp_params.lb_indication = false;
1902 }
1903
1904 if (!qp_params.mtu) {
1905 /* Stay with current MTU */
1906 if (qp->mtu)
1907 qp_params.mtu = qp->mtu;
1908 else
1909 qp_params.mtu =
1910 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1911 }
1912
1913 if (attr_mask & IB_QP_TIMEOUT) {
1914 SET_FIELD(qp_params.modify_flags,
1915 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1916
1917 qp_params.ack_timeout = attr->timeout;
1918 if (attr->timeout) {
1919 u32 temp;
1920
1921 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1922 /* FW requires [msec] */
1923 qp_params.ack_timeout = temp;
1924 } else {
1925 /* Infinite */
1926 qp_params.ack_timeout = 0;
1927 }
1928 }
1929 if (attr_mask & IB_QP_RETRY_CNT) {
1930 SET_FIELD(qp_params.modify_flags,
1931 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1932 qp_params.retry_cnt = attr->retry_cnt;
1933 }
1934
1935 if (attr_mask & IB_QP_RNR_RETRY) {
1936 SET_FIELD(qp_params.modify_flags,
1937 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1938 qp_params.rnr_retry_cnt = attr->rnr_retry;
1939 }
1940
1941 if (attr_mask & IB_QP_RQ_PSN) {
1942 SET_FIELD(qp_params.modify_flags,
1943 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1944 qp_params.rq_psn = attr->rq_psn;
1945 qp->rq_psn = attr->rq_psn;
1946 }
1947
1948 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1949 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1950 rc = -EINVAL;
1951 DP_ERR(dev,
1952 "unsupported max_rd_atomic=%d, supported=%d\n",
1953 attr->max_rd_atomic,
1954 dev->attr.max_qp_req_rd_atomic_resc);
1955 goto err;
1956 }
1957
1958 SET_FIELD(qp_params.modify_flags,
1959 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1960 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1961 }
1962
1963 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1964 SET_FIELD(qp_params.modify_flags,
1965 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1966 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1967 }
1968
1969 if (attr_mask & IB_QP_SQ_PSN) {
1970 SET_FIELD(qp_params.modify_flags,
1971 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1972 qp_params.sq_psn = attr->sq_psn;
1973 qp->sq_psn = attr->sq_psn;
1974 }
1975
1976 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1977 if (attr->max_dest_rd_atomic >
1978 dev->attr.max_qp_resp_rd_atomic_resc) {
1979 DP_ERR(dev,
1980 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1981 attr->max_dest_rd_atomic,
1982 dev->attr.max_qp_resp_rd_atomic_resc);
1983
1984 rc = -EINVAL;
1985 goto err;
1986 }
1987
1988 SET_FIELD(qp_params.modify_flags,
1989 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1990 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1991 }
1992
1993 if (attr_mask & IB_QP_DEST_QPN) {
1994 SET_FIELD(qp_params.modify_flags,
1995 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1996
1997 qp_params.dest_qp = attr->dest_qp_num;
1998 qp->dest_qp_num = attr->dest_qp_num;
1999 }
2000
2001 if (qp->qp_type != IB_QPT_GSI)
2002 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2003 qp->qed_qp, &qp_params);
2004
2005 if (attr_mask & IB_QP_STATE) {
2006 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02002007 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002008 qp->state = qp_params.new_state;
2009 }
2010
2011err:
2012 return rc;
2013}
2014
2015static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2016{
2017 int ib_qp_acc_flags = 0;
2018
2019 if (params->incoming_rdma_write_en)
2020 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2021 if (params->incoming_rdma_read_en)
2022 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2023 if (params->incoming_atomic_en)
2024 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2025 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2026 return ib_qp_acc_flags;
2027}
2028
2029int qedr_query_qp(struct ib_qp *ibqp,
2030 struct ib_qp_attr *qp_attr,
2031 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2032{
2033 struct qed_rdma_query_qp_out_params params;
2034 struct qedr_qp *qp = get_qedr_qp(ibqp);
2035 struct qedr_dev *dev = qp->dev;
2036 int rc = 0;
2037
2038 memset(&params, 0, sizeof(params));
2039
2040 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2041 if (rc)
2042 goto err;
2043
2044 memset(qp_attr, 0, sizeof(*qp_attr));
2045 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2046
2047 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2048 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002049 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002050 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2051 qp_attr->rq_psn = params.rq_psn;
2052 qp_attr->sq_psn = params.sq_psn;
2053 qp_attr->dest_qp_num = params.dest_qp;
2054
2055 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2056
2057 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2058 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2059 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2060 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002061 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002062 qp_init_attr->cap = qp_attr->cap;
2063
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04002064 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002065 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2066 params.flow_label, qp->sgid_idx,
2067 params.hop_limit_ttl, params.traffic_class_tos);
2068 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2069 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2070 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002071 qp_attr->timeout = params.timeout;
2072 qp_attr->rnr_retry = params.rnr_retry;
2073 qp_attr->retry_cnt = params.retry_cnt;
2074 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2075 qp_attr->pkey_index = params.pkey_index;
2076 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002077 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2078 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002079 qp_attr->alt_pkey_index = 0;
2080 qp_attr->alt_port_num = 0;
2081 qp_attr->alt_timeout = 0;
2082 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2083
2084 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2085 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2086 qp_attr->max_rd_atomic = params.max_rd_atomic;
2087 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2088
2089 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2090 qp_attr->cap.max_inline_data);
2091
2092err:
2093 return rc;
2094}
2095
Amrani, Ramdf158562016-12-22 14:52:24 +02002096int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2097{
2098 int rc = 0;
2099
2100 if (qp->qp_type != IB_QPT_GSI) {
2101 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2102 if (rc)
2103 return rc;
2104 }
2105
2106 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2107 qedr_cleanup_user(dev, qp);
2108 else
2109 qedr_cleanup_kernel(dev, qp);
2110
2111 return 0;
2112}
2113
Ram Amranicecbcdd2016-10-10 13:15:34 +03002114int qedr_destroy_qp(struct ib_qp *ibqp)
2115{
2116 struct qedr_qp *qp = get_qedr_qp(ibqp);
2117 struct qedr_dev *dev = qp->dev;
2118 struct ib_qp_attr attr;
2119 int attr_mask = 0;
2120 int rc = 0;
2121
2122 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2123 qp, qp->qp_type);
2124
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002125 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2126 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2127 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2128
Ram Amranicecbcdd2016-10-10 13:15:34 +03002129 attr.qp_state = IB_QPS_ERR;
2130 attr_mask |= IB_QP_STATE;
2131
2132 /* Change the QP state to ERROR */
2133 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2134 }
2135
Amrani, Ramdf158562016-12-22 14:52:24 +02002136 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002137 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002138
Amrani, Ramdf158562016-12-22 14:52:24 +02002139 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002140
2141 kfree(qp);
2142
2143 return rc;
2144}
Ram Amranie0290cc2016-10-10 13:15:35 +03002145
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002146struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002147 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002148{
2149 struct qedr_ah *ah;
2150
2151 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2152 if (!ah)
2153 return ERR_PTR(-ENOMEM);
2154
2155 ah->attr = *attr;
2156
2157 return &ah->ibah;
2158}
2159
2160int qedr_destroy_ah(struct ib_ah *ibah)
2161{
2162 struct qedr_ah *ah = get_qedr_ah(ibah);
2163
2164 kfree(ah);
2165 return 0;
2166}
2167
Ram Amranie0290cc2016-10-10 13:15:35 +03002168static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2169{
2170 struct qedr_pbl *pbl, *tmp;
2171
2172 if (info->pbl_table)
2173 list_add_tail(&info->pbl_table->list_entry,
2174 &info->free_pbl_list);
2175
2176 if (!list_empty(&info->inuse_pbl_list))
2177 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2178
2179 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2180 list_del(&pbl->list_entry);
2181 qedr_free_pbl(dev, &info->pbl_info, pbl);
2182 }
2183}
2184
2185static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2186 size_t page_list_len, bool two_layered)
2187{
2188 struct qedr_pbl *tmp;
2189 int rc;
2190
2191 INIT_LIST_HEAD(&info->free_pbl_list);
2192 INIT_LIST_HEAD(&info->inuse_pbl_list);
2193
2194 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2195 page_list_len, two_layered);
2196 if (rc)
2197 goto done;
2198
2199 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002200 if (IS_ERR(info->pbl_table)) {
2201 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002202 goto done;
2203 }
2204
2205 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2206 &info->pbl_table->pa);
2207
2208 /* in usual case we use 2 PBLs, so we add one to free
2209 * list and allocating another one
2210 */
2211 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002212 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002213 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2214 goto done;
2215 }
2216
2217 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2218
2219 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2220
2221done:
2222 if (rc)
2223 free_mr_info(dev, info);
2224
2225 return rc;
2226}
2227
2228struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2229 u64 usr_addr, int acc, struct ib_udata *udata)
2230{
2231 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2232 struct qedr_mr *mr;
2233 struct qedr_pd *pd;
2234 int rc = -ENOMEM;
2235
2236 pd = get_qedr_pd(ibpd);
2237 DP_DEBUG(dev, QEDR_MSG_MR,
2238 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2239 pd->pd_id, start, len, usr_addr, acc);
2240
2241 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2242 return ERR_PTR(-EINVAL);
2243
2244 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2245 if (!mr)
2246 return ERR_PTR(rc);
2247
2248 mr->type = QEDR_MR_USER;
2249
2250 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2251 if (IS_ERR(mr->umem)) {
2252 rc = -EFAULT;
2253 goto err0;
2254 }
2255
2256 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2257 if (rc)
2258 goto err1;
2259
2260 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
Ram Amranie57bb6b2017-06-05 16:32:27 +03002261 &mr->info.pbl_info, mr->umem->page_shift);
Ram Amranie0290cc2016-10-10 13:15:35 +03002262
2263 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2264 if (rc) {
2265 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2266 goto err1;
2267 }
2268
2269 /* Index only, 18 bit long, lkey = itid << 8 | key */
2270 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2271 mr->hw_mr.key = 0;
2272 mr->hw_mr.pd = pd->pd_id;
2273 mr->hw_mr.local_read = 1;
2274 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2275 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2276 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2277 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2278 mr->hw_mr.mw_bind = false;
2279 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2280 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2281 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002282 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002283 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2284 mr->hw_mr.length = len;
2285 mr->hw_mr.vaddr = usr_addr;
2286 mr->hw_mr.zbva = false;
2287 mr->hw_mr.phy_mr = false;
2288 mr->hw_mr.dma_mr = false;
2289
2290 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2291 if (rc) {
2292 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2293 goto err2;
2294 }
2295
2296 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2297 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2298 mr->hw_mr.remote_atomic)
2299 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2300
2301 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2302 mr->ibmr.lkey);
2303 return &mr->ibmr;
2304
2305err2:
2306 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2307err1:
2308 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2309err0:
2310 kfree(mr);
2311 return ERR_PTR(rc);
2312}
2313
2314int qedr_dereg_mr(struct ib_mr *ib_mr)
2315{
2316 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2317 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2318 int rc = 0;
2319
2320 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2321 if (rc)
2322 return rc;
2323
2324 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2325
2326 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2327 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2328
2329 /* it could be user registered memory. */
2330 if (mr->umem)
2331 ib_umem_release(mr->umem);
2332
2333 kfree(mr);
2334
2335 return rc;
2336}
2337
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002338static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2339 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002340{
2341 struct qedr_pd *pd = get_qedr_pd(ibpd);
2342 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2343 struct qedr_mr *mr;
2344 int rc = -ENOMEM;
2345
2346 DP_DEBUG(dev, QEDR_MSG_MR,
2347 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2348 max_page_list_len);
2349
2350 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2351 if (!mr)
2352 return ERR_PTR(rc);
2353
2354 mr->dev = dev;
2355 mr->type = QEDR_MR_FRMR;
2356
2357 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2358 if (rc)
2359 goto err0;
2360
2361 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2362 if (rc) {
2363 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2364 goto err0;
2365 }
2366
2367 /* Index only, 18 bit long, lkey = itid << 8 | key */
2368 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2369 mr->hw_mr.key = 0;
2370 mr->hw_mr.pd = pd->pd_id;
2371 mr->hw_mr.local_read = 1;
2372 mr->hw_mr.local_write = 0;
2373 mr->hw_mr.remote_read = 0;
2374 mr->hw_mr.remote_write = 0;
2375 mr->hw_mr.remote_atomic = 0;
2376 mr->hw_mr.mw_bind = false;
2377 mr->hw_mr.pbl_ptr = 0;
2378 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2379 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2380 mr->hw_mr.fbo = 0;
2381 mr->hw_mr.length = 0;
2382 mr->hw_mr.vaddr = 0;
2383 mr->hw_mr.zbva = false;
2384 mr->hw_mr.phy_mr = true;
2385 mr->hw_mr.dma_mr = false;
2386
2387 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2388 if (rc) {
2389 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2390 goto err1;
2391 }
2392
2393 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2394 mr->ibmr.rkey = mr->ibmr.lkey;
2395
2396 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2397 return mr;
2398
2399err1:
2400 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2401err0:
2402 kfree(mr);
2403 return ERR_PTR(rc);
2404}
2405
2406struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2407 enum ib_mr_type mr_type, u32 max_num_sg)
2408{
2409 struct qedr_dev *dev;
2410 struct qedr_mr *mr;
2411
2412 if (mr_type != IB_MR_TYPE_MEM_REG)
2413 return ERR_PTR(-EINVAL);
2414
2415 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2416
2417 if (IS_ERR(mr))
2418 return ERR_PTR(-EINVAL);
2419
2420 dev = mr->dev;
2421
2422 return &mr->ibmr;
2423}
2424
2425static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2426{
2427 struct qedr_mr *mr = get_qedr_mr(ibmr);
2428 struct qedr_pbl *pbl_table;
2429 struct regpair *pbe;
2430 u32 pbes_in_page;
2431
2432 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2433 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2434 return -ENOMEM;
2435 }
2436
2437 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2438 mr->npages, addr);
2439
2440 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2441 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2442 pbe = (struct regpair *)pbl_table->va;
2443 pbe += mr->npages % pbes_in_page;
2444 pbe->lo = cpu_to_le32((u32)addr);
2445 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2446
2447 mr->npages++;
2448
2449 return 0;
2450}
2451
2452static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2453{
2454 int work = info->completed - info->completed_handled - 1;
2455
2456 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2457 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2458 struct qedr_pbl *pbl;
2459
2460 /* Free all the page list that are possible to be freed
2461 * (all the ones that were invalidated), under the assumption
2462 * that if an FMR was completed successfully that means that
2463 * if there was an invalidate operation before it also ended
2464 */
2465 pbl = list_first_entry(&info->inuse_pbl_list,
2466 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002467 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002468 info->completed_handled++;
2469 }
2470}
2471
2472int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2473 int sg_nents, unsigned int *sg_offset)
2474{
2475 struct qedr_mr *mr = get_qedr_mr(ibmr);
2476
2477 mr->npages = 0;
2478
2479 handle_completed_mrs(mr->dev, &mr->info);
2480 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2481}
2482
2483struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2484{
2485 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2486 struct qedr_pd *pd = get_qedr_pd(ibpd);
2487 struct qedr_mr *mr;
2488 int rc;
2489
2490 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2491 if (!mr)
2492 return ERR_PTR(-ENOMEM);
2493
2494 mr->type = QEDR_MR_DMA;
2495
2496 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2497 if (rc) {
2498 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2499 goto err1;
2500 }
2501
2502 /* index only, 18 bit long, lkey = itid << 8 | key */
2503 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2504 mr->hw_mr.pd = pd->pd_id;
2505 mr->hw_mr.local_read = 1;
2506 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2507 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2508 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2509 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2510 mr->hw_mr.dma_mr = true;
2511
2512 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2513 if (rc) {
2514 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2515 goto err2;
2516 }
2517
2518 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2519 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2520 mr->hw_mr.remote_atomic)
2521 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2522
2523 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2524 return &mr->ibmr;
2525
2526err2:
2527 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2528err1:
2529 kfree(mr);
2530 return ERR_PTR(rc);
2531}
Ram Amraniafa0e132016-10-10 13:15:36 +03002532
2533static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2534{
2535 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2536}
2537
2538static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2539{
2540 int i, len = 0;
2541
2542 for (i = 0; i < num_sge; i++)
2543 len += sg_list[i].length;
2544
2545 return len;
2546}
2547
2548static void swap_wqe_data64(u64 *p)
2549{
2550 int i;
2551
2552 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2553 *p = cpu_to_be64(cpu_to_le64(*p));
2554}
2555
2556static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2557 struct qedr_qp *qp, u8 *wqe_size,
2558 struct ib_send_wr *wr,
2559 struct ib_send_wr **bad_wr, u8 *bits,
2560 u8 bit)
2561{
2562 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2563 char *seg_prt, *wqe;
2564 int i, seg_siz;
2565
2566 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2567 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2568 *bad_wr = wr;
2569 return 0;
2570 }
2571
2572 if (!data_size)
2573 return data_size;
2574
2575 *bits |= bit;
2576
2577 seg_prt = NULL;
2578 wqe = NULL;
2579 seg_siz = 0;
2580
2581 /* Copy data inline */
2582 for (i = 0; i < wr->num_sge; i++) {
2583 u32 len = wr->sg_list[i].length;
2584 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2585
2586 while (len > 0) {
2587 u32 cur;
2588
2589 /* New segment required */
2590 if (!seg_siz) {
2591 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2592 seg_prt = wqe;
2593 seg_siz = sizeof(struct rdma_sq_common_wqe);
2594 (*wqe_size)++;
2595 }
2596
2597 /* Calculate currently allowed length */
2598 cur = min_t(u32, len, seg_siz);
2599 memcpy(seg_prt, src, cur);
2600
2601 /* Update segment variables */
2602 seg_prt += cur;
2603 seg_siz -= cur;
2604
2605 /* Update sge variables */
2606 src += cur;
2607 len -= cur;
2608
2609 /* Swap fully-completed segments */
2610 if (!seg_siz)
2611 swap_wqe_data64((u64 *)wqe);
2612 }
2613 }
2614
2615 /* swap last not completed segment */
2616 if (seg_siz)
2617 swap_wqe_data64((u64 *)wqe);
2618
2619 return data_size;
2620}
2621
2622#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2623 do { \
2624 DMA_REGPAIR_LE(sge->addr, vaddr); \
2625 (sge)->length = cpu_to_le32(vlength); \
2626 (sge)->flags = cpu_to_le32(vflags); \
2627 } while (0)
2628
2629#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2630 do { \
2631 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2632 (hdr)->num_sges = num_sge; \
2633 } while (0)
2634
2635#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2636 do { \
2637 DMA_REGPAIR_LE(sge->addr, vaddr); \
2638 (sge)->length = cpu_to_le32(vlength); \
2639 (sge)->l_key = cpu_to_le32(vlkey); \
2640 } while (0)
2641
2642static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2643 struct ib_send_wr *wr)
2644{
2645 u32 data_size = 0;
2646 int i;
2647
2648 for (i = 0; i < wr->num_sge; i++) {
2649 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2650
2651 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2652 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2653 sge->length = cpu_to_le32(wr->sg_list[i].length);
2654 data_size += wr->sg_list[i].length;
2655 }
2656
2657 if (wqe_size)
2658 *wqe_size += wr->num_sge;
2659
2660 return data_size;
2661}
2662
2663static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2664 struct qedr_qp *qp,
2665 struct rdma_sq_rdma_wqe_1st *rwqe,
2666 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2667 struct ib_send_wr *wr,
2668 struct ib_send_wr **bad_wr)
2669{
2670 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2671 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2672
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002673 if (wr->send_flags & IB_SEND_INLINE &&
2674 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2675 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002676 u8 flags = 0;
2677
2678 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2679 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2680 bad_wr, &rwqe->flags, flags);
2681 }
2682
2683 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2684}
2685
2686static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2687 struct qedr_qp *qp,
2688 struct rdma_sq_send_wqe_1st *swqe,
2689 struct rdma_sq_send_wqe_2st *swqe2,
2690 struct ib_send_wr *wr,
2691 struct ib_send_wr **bad_wr)
2692{
2693 memset(swqe2, 0, sizeof(*swqe2));
2694 if (wr->send_flags & IB_SEND_INLINE) {
2695 u8 flags = 0;
2696
2697 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2698 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2699 bad_wr, &swqe->flags, flags);
2700 }
2701
2702 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2703}
2704
2705static int qedr_prepare_reg(struct qedr_qp *qp,
2706 struct rdma_sq_fmr_wqe_1st *fwqe1,
2707 struct ib_reg_wr *wr)
2708{
2709 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2710 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2711
2712 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2713 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2714 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2715 fwqe1->l_key = wr->key;
2716
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002717 fwqe2->access_ctrl = 0;
2718
Ram Amraniafa0e132016-10-10 13:15:36 +03002719 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2720 !!(wr->access & IB_ACCESS_REMOTE_READ));
2721 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2722 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2723 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2724 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2725 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2726 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2727 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2728 fwqe2->fmr_ctrl = 0;
2729
2730 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2731 ilog2(mr->ibmr.page_size) - 12);
2732
2733 fwqe2->length_hi = 0;
2734 fwqe2->length_lo = mr->ibmr.length;
2735 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2736 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2737
2738 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2739
2740 return 0;
2741}
2742
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002743static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002744{
2745 switch (opcode) {
2746 case IB_WR_RDMA_WRITE:
2747 case IB_WR_RDMA_WRITE_WITH_IMM:
2748 return IB_WC_RDMA_WRITE;
2749 case IB_WR_SEND_WITH_IMM:
2750 case IB_WR_SEND:
2751 case IB_WR_SEND_WITH_INV:
2752 return IB_WC_SEND;
2753 case IB_WR_RDMA_READ:
2754 return IB_WC_RDMA_READ;
2755 case IB_WR_ATOMIC_CMP_AND_SWP:
2756 return IB_WC_COMP_SWAP;
2757 case IB_WR_ATOMIC_FETCH_AND_ADD:
2758 return IB_WC_FETCH_ADD;
2759 case IB_WR_REG_MR:
2760 return IB_WC_REG_MR;
2761 case IB_WR_LOCAL_INV:
2762 return IB_WC_LOCAL_INV;
2763 default:
2764 return IB_WC_SEND;
2765 }
2766}
2767
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002768static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002769{
2770 int wq_is_full, err_wr, pbl_is_full;
2771 struct qedr_dev *dev = qp->dev;
2772
2773 /* prevent SQ overflow and/or processing of a bad WR */
2774 err_wr = wr->num_sge > qp->sq.max_sges;
2775 wq_is_full = qedr_wq_is_full(&qp->sq);
2776 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2777 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2778 if (wq_is_full || err_wr || pbl_is_full) {
2779 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2780 DP_ERR(dev,
2781 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2782 qp);
2783 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2784 }
2785
2786 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2787 DP_ERR(dev,
2788 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2789 qp);
2790 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2791 }
2792
2793 if (pbl_is_full &&
2794 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2795 DP_ERR(dev,
2796 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2797 qp);
2798 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2799 }
2800 return false;
2801 }
2802 return true;
2803}
2804
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002805static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03002806 struct ib_send_wr **bad_wr)
2807{
2808 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2809 struct qedr_qp *qp = get_qedr_qp(ibqp);
2810 struct rdma_sq_atomic_wqe_1st *awqe1;
2811 struct rdma_sq_atomic_wqe_2nd *awqe2;
2812 struct rdma_sq_atomic_wqe_3rd *awqe3;
2813 struct rdma_sq_send_wqe_2st *swqe2;
2814 struct rdma_sq_local_inv_wqe *iwqe;
2815 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2816 struct rdma_sq_send_wqe_1st *swqe;
2817 struct rdma_sq_rdma_wqe_1st *rwqe;
2818 struct rdma_sq_fmr_wqe_1st *fwqe1;
2819 struct rdma_sq_common_wqe *wqe;
2820 u32 length;
2821 int rc = 0;
2822 bool comp;
2823
2824 if (!qedr_can_post_send(qp, wr)) {
2825 *bad_wr = wr;
2826 return -ENOMEM;
2827 }
2828
2829 wqe = qed_chain_produce(&qp->sq.pbl);
2830 qp->wqe_wr_id[qp->sq.prod].signaled =
2831 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2832
2833 wqe->flags = 0;
2834 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2835 !!(wr->send_flags & IB_SEND_SOLICITED));
2836 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2837 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2838 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2839 !!(wr->send_flags & IB_SEND_FENCE));
2840 wqe->prev_wqe_size = qp->prev_wqe_size;
2841
2842 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2843
2844 switch (wr->opcode) {
2845 case IB_WR_SEND_WITH_IMM:
2846 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2847 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2848 swqe->wqe_size = 2;
2849 swqe2 = qed_chain_produce(&qp->sq.pbl);
2850
2851 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2852 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2853 wr, bad_wr);
2854 swqe->length = cpu_to_le32(length);
2855 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2856 qp->prev_wqe_size = swqe->wqe_size;
2857 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2858 break;
2859 case IB_WR_SEND:
2860 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2861 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2862
2863 swqe->wqe_size = 2;
2864 swqe2 = qed_chain_produce(&qp->sq.pbl);
2865 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2866 wr, bad_wr);
2867 swqe->length = cpu_to_le32(length);
2868 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2869 qp->prev_wqe_size = swqe->wqe_size;
2870 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2871 break;
2872 case IB_WR_SEND_WITH_INV:
2873 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2874 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2875 swqe2 = qed_chain_produce(&qp->sq.pbl);
2876 swqe->wqe_size = 2;
2877 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2878 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2879 wr, bad_wr);
2880 swqe->length = cpu_to_le32(length);
2881 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2882 qp->prev_wqe_size = swqe->wqe_size;
2883 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2884 break;
2885
2886 case IB_WR_RDMA_WRITE_WITH_IMM:
2887 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2888 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2889
2890 rwqe->wqe_size = 2;
2891 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2892 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2893 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2894 wr, bad_wr);
2895 rwqe->length = cpu_to_le32(length);
2896 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2897 qp->prev_wqe_size = rwqe->wqe_size;
2898 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2899 break;
2900 case IB_WR_RDMA_WRITE:
2901 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2902 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2903
2904 rwqe->wqe_size = 2;
2905 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2906 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2907 wr, bad_wr);
2908 rwqe->length = cpu_to_le32(length);
2909 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2910 qp->prev_wqe_size = rwqe->wqe_size;
2911 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2912 break;
2913 case IB_WR_RDMA_READ_WITH_INV:
2914 DP_ERR(dev,
2915 "RDMA READ WITH INVALIDATE not supported\n");
2916 *bad_wr = wr;
2917 rc = -EINVAL;
2918 break;
2919
2920 case IB_WR_RDMA_READ:
2921 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2922 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2923
2924 rwqe->wqe_size = 2;
2925 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2926 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2927 wr, bad_wr);
2928 rwqe->length = cpu_to_le32(length);
2929 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2930 qp->prev_wqe_size = rwqe->wqe_size;
2931 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2932 break;
2933
2934 case IB_WR_ATOMIC_CMP_AND_SWP:
2935 case IB_WR_ATOMIC_FETCH_AND_ADD:
2936 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2937 awqe1->wqe_size = 4;
2938
2939 awqe2 = qed_chain_produce(&qp->sq.pbl);
2940 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2941 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2942
2943 awqe3 = qed_chain_produce(&qp->sq.pbl);
2944
2945 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2946 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2947 DMA_REGPAIR_LE(awqe3->swap_data,
2948 atomic_wr(wr)->compare_add);
2949 } else {
2950 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2951 DMA_REGPAIR_LE(awqe3->swap_data,
2952 atomic_wr(wr)->swap);
2953 DMA_REGPAIR_LE(awqe3->cmp_data,
2954 atomic_wr(wr)->compare_add);
2955 }
2956
2957 qedr_prepare_sq_sges(qp, NULL, wr);
2958
2959 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2960 qp->prev_wqe_size = awqe1->wqe_size;
2961 break;
2962
2963 case IB_WR_LOCAL_INV:
2964 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2965 iwqe->wqe_size = 1;
2966
2967 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2968 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2969 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2970 qp->prev_wqe_size = iwqe->wqe_size;
2971 break;
2972 case IB_WR_REG_MR:
2973 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2974 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2975 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2976 fwqe1->wqe_size = 2;
2977
2978 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2979 if (rc) {
2980 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2981 *bad_wr = wr;
2982 break;
2983 }
2984
2985 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2986 qp->prev_wqe_size = fwqe1->wqe_size;
2987 break;
2988 default:
2989 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2990 rc = -EINVAL;
2991 *bad_wr = wr;
2992 break;
2993 }
2994
2995 if (*bad_wr) {
2996 u16 value;
2997
2998 /* Restore prod to its position before
2999 * this WR was processed
3000 */
3001 value = le16_to_cpu(qp->sq.db_data.data.value);
3002 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3003
3004 /* Restore prev_wqe_size */
3005 qp->prev_wqe_size = wqe->prev_wqe_size;
3006 rc = -EINVAL;
3007 DP_ERR(dev, "POST SEND FAILED\n");
3008 }
3009
3010 return rc;
3011}
3012
3013int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3014 struct ib_send_wr **bad_wr)
3015{
3016 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3017 struct qedr_qp *qp = get_qedr_qp(ibqp);
3018 unsigned long flags;
3019 int rc = 0;
3020
3021 *bad_wr = NULL;
3022
Ram Amrani04886772016-10-10 13:15:38 +03003023 if (qp->qp_type == IB_QPT_GSI)
3024 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3025
Ram Amraniafa0e132016-10-10 13:15:36 +03003026 spin_lock_irqsave(&qp->q_lock, flags);
3027
Amrani, Ram922d9a42016-12-22 14:40:38 +02003028 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3029 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3030 (qp->state != QED_ROCE_QP_STATE_SQD)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003031 spin_unlock_irqrestore(&qp->q_lock, flags);
3032 *bad_wr = wr;
3033 DP_DEBUG(dev, QEDR_MSG_CQ,
3034 "QP in wrong state! QP icid=0x%x state %d\n",
3035 qp->icid, qp->state);
3036 return -EINVAL;
3037 }
3038
Ram Amraniafa0e132016-10-10 13:15:36 +03003039 while (wr) {
3040 rc = __qedr_post_send(ibqp, wr, bad_wr);
3041 if (rc)
3042 break;
3043
3044 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3045
3046 qedr_inc_sw_prod(&qp->sq);
3047
3048 qp->sq.db_data.data.value++;
3049
3050 wr = wr->next;
3051 }
3052
3053 /* Trigger doorbell
3054 * If there was a failure in the first WR then it will be triggered in
3055 * vane. However this is not harmful (as long as the producer value is
3056 * unchanged). For performance reasons we avoid checking for this
3057 * redundant doorbell.
3058 */
3059 wmb();
3060 writel(qp->sq.db_data.raw, qp->sq.db);
3061
3062 /* Make sure write sticks */
3063 mmiowb();
3064
3065 spin_unlock_irqrestore(&qp->q_lock, flags);
3066
3067 return rc;
3068}
3069
3070int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3071 struct ib_recv_wr **bad_wr)
3072{
3073 struct qedr_qp *qp = get_qedr_qp(ibqp);
3074 struct qedr_dev *dev = qp->dev;
3075 unsigned long flags;
3076 int status = 0;
3077
Ram Amrani04886772016-10-10 13:15:38 +03003078 if (qp->qp_type == IB_QPT_GSI)
3079 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3080
Ram Amraniafa0e132016-10-10 13:15:36 +03003081 spin_lock_irqsave(&qp->q_lock, flags);
3082
Amrani, Ram922d9a42016-12-22 14:40:38 +02003083 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003084 spin_unlock_irqrestore(&qp->q_lock, flags);
3085 *bad_wr = wr;
3086 return -EINVAL;
3087 }
3088
3089 while (wr) {
3090 int i;
3091
3092 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3093 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3094 wr->num_sge > qp->rq.max_sges) {
3095 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3096 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3097 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3098 qp->rq.max_sges);
3099 status = -ENOMEM;
3100 *bad_wr = wr;
3101 break;
3102 }
3103 for (i = 0; i < wr->num_sge; i++) {
3104 u32 flags = 0;
3105 struct rdma_rq_sge *rqe =
3106 qed_chain_produce(&qp->rq.pbl);
3107
3108 /* First one must include the number
3109 * of SGE in the list
3110 */
3111 if (!i)
3112 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3113 wr->num_sge);
3114
3115 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3116 wr->sg_list[i].lkey);
3117
3118 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3119 wr->sg_list[i].length, flags);
3120 }
3121
3122 /* Special case of no sges. FW requires between 1-4 sges...
3123 * in this case we need to post 1 sge with length zero. this is
3124 * because rdma write with immediate consumes an RQ.
3125 */
3126 if (!wr->num_sge) {
3127 u32 flags = 0;
3128 struct rdma_rq_sge *rqe =
3129 qed_chain_produce(&qp->rq.pbl);
3130
3131 /* First one must include the number
3132 * of SGE in the list
3133 */
3134 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3135 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3136
3137 RQ_SGE_SET(rqe, 0, 0, flags);
3138 i = 1;
3139 }
3140
3141 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3142 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3143
3144 qedr_inc_sw_prod(&qp->rq);
3145
3146 /* Flush all the writes before signalling doorbell */
3147 wmb();
3148
3149 qp->rq.db_data.data.value++;
3150
3151 writel(qp->rq.db_data.raw, qp->rq.db);
3152
3153 /* Make sure write sticks */
3154 mmiowb();
3155
3156 wr = wr->next;
3157 }
3158
3159 spin_unlock_irqrestore(&qp->q_lock, flags);
3160
3161 return status;
3162}
3163
3164static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3165{
3166 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3167
3168 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3169 cq->pbl_toggle;
3170}
3171
3172static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3173{
3174 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3175 struct qedr_qp *qp;
3176
3177 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3178 resp_cqe->qp_handle.lo,
3179 u64);
3180 return qp;
3181}
3182
3183static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3184{
3185 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3186
3187 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3188}
3189
3190/* Return latest CQE (needs processing) */
3191static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3192{
3193 return cq->latest_cqe;
3194}
3195
3196/* In fmr we need to increase the number of fmr completed counter for the fmr
3197 * algorithm determining whether we can free a pbl or not.
3198 * we need to perform this whether the work request was signaled or not. for
3199 * this purpose we call this function from the condition that checks if a wr
3200 * should be skipped, to make sure we don't miss it ( possibly this fmr
3201 * operation was not signalted)
3202 */
3203static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3204{
3205 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3206 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3207}
3208
3209static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3210 struct qedr_cq *cq, int num_entries,
3211 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3212 int force)
3213{
3214 u16 cnt = 0;
3215
3216 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3217 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3218 qedr_chk_if_fmr(qp);
3219 /* skip WC */
3220 goto next_cqe;
3221 }
3222
3223 /* fill WC */
3224 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003225 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003226 wc->wc_flags = 0;
3227 wc->src_qp = qp->id;
3228 wc->qp = &qp->ibqp;
3229
3230 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3231 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3232
3233 switch (wc->opcode) {
3234 case IB_WC_RDMA_WRITE:
3235 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3236 break;
3237 case IB_WC_COMP_SWAP:
3238 case IB_WC_FETCH_ADD:
3239 wc->byte_len = 8;
3240 break;
3241 case IB_WC_REG_MR:
3242 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3243 break;
Michal Kalderondac27382017-06-05 16:32:26 +03003244 case IB_WC_RDMA_READ:
3245 case IB_WC_SEND:
3246 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3247 break;
Ram Amraniafa0e132016-10-10 13:15:36 +03003248 default:
3249 break;
3250 }
3251
3252 num_entries--;
3253 wc++;
3254 cnt++;
3255next_cqe:
3256 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3257 qed_chain_consume(&qp->sq.pbl);
3258 qedr_inc_sw_cons(&qp->sq);
3259 }
3260
3261 return cnt;
3262}
3263
3264static int qedr_poll_cq_req(struct qedr_dev *dev,
3265 struct qedr_qp *qp, struct qedr_cq *cq,
3266 int num_entries, struct ib_wc *wc,
3267 struct rdma_cqe_requester *req)
3268{
3269 int cnt = 0;
3270
3271 switch (req->status) {
3272 case RDMA_CQE_REQ_STS_OK:
3273 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3274 IB_WC_SUCCESS, 0);
3275 break;
3276 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003277 if (qp->state != QED_ROCE_QP_STATE_ERR)
3278 DP_ERR(dev,
3279 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3280 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003281 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003282 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003283 break;
3284 default:
3285 /* process all WQE before the cosumer */
3286 qp->state = QED_ROCE_QP_STATE_ERR;
3287 cnt = process_req(dev, qp, cq, num_entries, wc,
3288 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3289 wc += cnt;
3290 /* if we have extra WC fill it with actual error info */
3291 if (cnt < num_entries) {
3292 enum ib_wc_status wc_status;
3293
3294 switch (req->status) {
3295 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3296 DP_ERR(dev,
3297 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3298 cq->icid, qp->icid);
3299 wc_status = IB_WC_BAD_RESP_ERR;
3300 break;
3301 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3302 DP_ERR(dev,
3303 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3304 cq->icid, qp->icid);
3305 wc_status = IB_WC_LOC_LEN_ERR;
3306 break;
3307 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3308 DP_ERR(dev,
3309 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3310 cq->icid, qp->icid);
3311 wc_status = IB_WC_LOC_QP_OP_ERR;
3312 break;
3313 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3314 DP_ERR(dev,
3315 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3316 cq->icid, qp->icid);
3317 wc_status = IB_WC_LOC_PROT_ERR;
3318 break;
3319 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3320 DP_ERR(dev,
3321 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3322 cq->icid, qp->icid);
3323 wc_status = IB_WC_MW_BIND_ERR;
3324 break;
3325 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3326 DP_ERR(dev,
3327 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3328 cq->icid, qp->icid);
3329 wc_status = IB_WC_REM_INV_REQ_ERR;
3330 break;
3331 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3332 DP_ERR(dev,
3333 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3334 cq->icid, qp->icid);
3335 wc_status = IB_WC_REM_ACCESS_ERR;
3336 break;
3337 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3338 DP_ERR(dev,
3339 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3340 cq->icid, qp->icid);
3341 wc_status = IB_WC_REM_OP_ERR;
3342 break;
3343 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3344 DP_ERR(dev,
3345 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3346 cq->icid, qp->icid);
3347 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3348 break;
3349 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3350 DP_ERR(dev,
3351 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3352 cq->icid, qp->icid);
3353 wc_status = IB_WC_RETRY_EXC_ERR;
3354 break;
3355 default:
3356 DP_ERR(dev,
3357 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3358 cq->icid, qp->icid);
3359 wc_status = IB_WC_GENERAL_ERR;
3360 }
3361 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3362 wc_status, 1);
3363 }
3364 }
3365
3366 return cnt;
3367}
3368
Amrani, Ramb6acd712017-04-27 13:35:35 +03003369static inline int qedr_cqe_resp_status_to_ib(u8 status)
3370{
3371 switch (status) {
3372 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3373 return IB_WC_LOC_ACCESS_ERR;
3374 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3375 return IB_WC_LOC_LEN_ERR;
3376 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3377 return IB_WC_LOC_QP_OP_ERR;
3378 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3379 return IB_WC_LOC_PROT_ERR;
3380 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3381 return IB_WC_MW_BIND_ERR;
3382 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3383 return IB_WC_REM_INV_RD_REQ_ERR;
3384 case RDMA_CQE_RESP_STS_OK:
3385 return IB_WC_SUCCESS;
3386 default:
3387 return IB_WC_GENERAL_ERR;
3388 }
3389}
3390
3391static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3392 struct ib_wc *wc)
3393{
3394 wc->status = IB_WC_SUCCESS;
3395 wc->byte_len = le32_to_cpu(resp->length);
3396
3397 if (resp->flags & QEDR_RESP_IMM) {
3398 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3399 wc->wc_flags |= IB_WC_WITH_IMM;
3400
3401 if (resp->flags & QEDR_RESP_RDMA)
3402 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3403
3404 if (resp->flags & QEDR_RESP_INV)
3405 return -EINVAL;
3406
3407 } else if (resp->flags & QEDR_RESP_INV) {
3408 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3409 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3410
3411 if (resp->flags & QEDR_RESP_RDMA)
3412 return -EINVAL;
3413
3414 } else if (resp->flags & QEDR_RESP_RDMA) {
3415 return -EINVAL;
3416 }
3417
3418 return 0;
3419}
3420
Ram Amraniafa0e132016-10-10 13:15:36 +03003421static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3422 struct qedr_cq *cq, struct ib_wc *wc,
3423 struct rdma_cqe_responder *resp, u64 wr_id)
3424{
Amrani, Ramb6acd712017-04-27 13:35:35 +03003425 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
Ram Amraniafa0e132016-10-10 13:15:36 +03003426 wc->opcode = IB_WC_RECV;
3427 wc->wc_flags = 0;
3428
Amrani, Ramb6acd712017-04-27 13:35:35 +03003429 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3430 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3431 DP_ERR(dev,
3432 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3433 cq, cq->icid, resp->flags);
Ram Amraniafa0e132016-10-10 13:15:36 +03003434
Amrani, Ramb6acd712017-04-27 13:35:35 +03003435 } else {
3436 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3437 if (wc->status == IB_WC_GENERAL_ERR)
3438 DP_ERR(dev,
3439 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3440 cq, cq->icid, resp->status);
Ram Amraniafa0e132016-10-10 13:15:36 +03003441 }
3442
Amrani, Ramb6acd712017-04-27 13:35:35 +03003443 /* Fill the rest of the WC */
Amrani, Ram27035a12016-12-22 14:40:39 +02003444 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003445 wc->src_qp = qp->id;
3446 wc->qp = &qp->ibqp;
3447 wc->wr_id = wr_id;
3448}
3449
3450static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3451 struct qedr_cq *cq, struct ib_wc *wc,
3452 struct rdma_cqe_responder *resp)
3453{
3454 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3455
3456 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3457
3458 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3459 qed_chain_consume(&qp->rq.pbl);
3460 qedr_inc_sw_cons(&qp->rq);
3461
3462 return 1;
3463}
3464
3465static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3466 int num_entries, struct ib_wc *wc, u16 hw_cons)
3467{
3468 u16 cnt = 0;
3469
3470 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3471 /* fill WC */
3472 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003473 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003474 wc->wc_flags = 0;
3475 wc->src_qp = qp->id;
3476 wc->byte_len = 0;
3477 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3478 wc->qp = &qp->ibqp;
3479 num_entries--;
3480 wc++;
3481 cnt++;
3482 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3483 qed_chain_consume(&qp->rq.pbl);
3484 qedr_inc_sw_cons(&qp->rq);
3485 }
3486
3487 return cnt;
3488}
3489
3490static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3491 struct rdma_cqe_responder *resp, int *update)
3492{
3493 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3494 consume_cqe(cq);
3495 *update |= 1;
3496 }
3497}
3498
3499static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3500 struct qedr_cq *cq, int num_entries,
3501 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3502 int *update)
3503{
3504 int cnt;
3505
3506 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3507 cnt = process_resp_flush(qp, cq, num_entries, wc,
3508 resp->rq_cons);
3509 try_consume_resp_cqe(cq, qp, resp, update);
3510 } else {
3511 cnt = process_resp_one(dev, qp, cq, wc, resp);
3512 consume_cqe(cq);
3513 *update |= 1;
3514 }
3515
3516 return cnt;
3517}
3518
3519static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3520 struct rdma_cqe_requester *req, int *update)
3521{
3522 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3523 consume_cqe(cq);
3524 *update |= 1;
3525 }
3526}
3527
3528int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3529{
3530 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3531 struct qedr_cq *cq = get_qedr_cq(ibcq);
3532 union rdma_cqe *cqe = cq->latest_cqe;
3533 u32 old_cons, new_cons;
3534 unsigned long flags;
3535 int update = 0;
3536 int done = 0;
3537
Amrani, Ram4dd72632017-04-27 13:35:34 +03003538 if (cq->destroyed) {
3539 DP_ERR(dev,
3540 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3541 cq, cq->icid);
3542 return 0;
3543 }
3544
Ram Amrani04886772016-10-10 13:15:38 +03003545 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3546 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3547
Ram Amraniafa0e132016-10-10 13:15:36 +03003548 spin_lock_irqsave(&cq->cq_lock, flags);
3549 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3550 while (num_entries && is_valid_cqe(cq, cqe)) {
3551 struct qedr_qp *qp;
3552 int cnt = 0;
3553
3554 /* prevent speculative reads of any field of CQE */
3555 rmb();
3556
3557 qp = cqe_get_qp(cqe);
3558 if (!qp) {
3559 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3560 break;
3561 }
3562
3563 wc->qp = &qp->ibqp;
3564
3565 switch (cqe_get_type(cqe)) {
3566 case RDMA_CQE_TYPE_REQUESTER:
3567 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3568 &cqe->req);
3569 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3570 break;
3571 case RDMA_CQE_TYPE_RESPONDER_RQ:
3572 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3573 &cqe->resp, &update);
3574 break;
3575 case RDMA_CQE_TYPE_INVALID:
3576 default:
3577 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3578 cqe_get_type(cqe));
3579 }
3580 num_entries -= cnt;
3581 wc += cnt;
3582 done += cnt;
3583
3584 cqe = get_cqe(cq);
3585 }
3586 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3587
3588 cq->cq_cons += new_cons - old_cons;
3589
3590 if (update)
3591 /* doorbell notifies abount latest VALID entry,
3592 * but chain already point to the next INVALID one
3593 */
3594 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3595
3596 spin_unlock_irqrestore(&cq->cq_lock, flags);
3597 return done;
3598}
Ram Amrani993d1b52016-10-10 13:15:39 +03003599
3600int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3601 u8 port_num,
3602 const struct ib_wc *in_wc,
3603 const struct ib_grh *in_grh,
3604 const struct ib_mad_hdr *mad_hdr,
3605 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3606 size_t *out_mad_size, u16 *out_mad_pkey_index)
3607{
3608 struct qedr_dev *dev = get_qedr_dev(ibdev);
3609
3610 DP_DEBUG(dev, QEDR_MSG_GSI,
3611 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3612 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3613 mad_hdr->class_specific, mad_hdr->class_version,
3614 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3615 return IB_MAD_RESULT_SUCCESS;
3616}