blob: 1c995c6da8d838701424f4f0edf983140e08f241 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Kalderon, Michal99d195c2017-07-26 14:41:51 +030052#include "qedr_roce_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
Amrani, Ramc75d3ec2017-06-26 19:05:04 +030056static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
Ram Amrania7efd772016-10-10 13:15:33 +030064int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
Kalderon, Michale6a38c52017-07-26 14:41:52 +030073int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
Ram Amraniac1b36e2016-10-10 13:15:32 +030087int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
88 union ib_gid *sgid)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 int rc = 0;
92
93 if (!rdma_cap_roce_gid_table(ibdev, port))
94 return -ENODEV;
95
96 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
97 if (rc == -EAGAIN) {
98 memcpy(sgid, &zgid, sizeof(*sgid));
99 return 0;
100 }
101
102 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
103 sgid->global.interface_id, sgid->global.subnet_prefix);
104
105 return rc;
106}
107
108int qedr_add_gid(struct ib_device *device, u8 port_num,
109 unsigned int index, const union ib_gid *gid,
110 const struct ib_gid_attr *attr, void **context)
111{
112 if (!rdma_cap_roce_gid_table(device, port_num))
113 return -EINVAL;
114
115 if (port_num > QEDR_MAX_PORT)
116 return -EINVAL;
117
118 if (!context)
119 return -EINVAL;
120
121 return 0;
122}
123
124int qedr_del_gid(struct ib_device *device, u8 port_num,
125 unsigned int index, void **context)
126{
127 if (!rdma_cap_roce_gid_table(device, port_num))
128 return -EINVAL;
129
130 if (port_num > QEDR_MAX_PORT)
131 return -EINVAL;
132
133 if (!context)
134 return -EINVAL;
135
136 return 0;
137}
138
139int qedr_query_device(struct ib_device *ibdev,
140 struct ib_device_attr *attr, struct ib_udata *udata)
141{
142 struct qedr_dev *dev = get_qedr_dev(ibdev);
143 struct qedr_device_attr *qattr = &dev->attr;
144
145 if (!dev->rdma_ctx) {
146 DP_ERR(dev,
147 "qedr_query_device called with invalid params rdma_ctx=%p\n",
148 dev->rdma_ctx);
149 return -EINVAL;
150 }
151
152 memset(attr, 0, sizeof(*attr));
153
154 attr->fw_ver = qattr->fw_ver;
155 attr->sys_image_guid = qattr->sys_image_guid;
156 attr->max_mr_size = qattr->max_mr_size;
157 attr->page_size_cap = qattr->page_size_caps;
158 attr->vendor_id = qattr->vendor_id;
159 attr->vendor_part_id = qattr->vendor_part_id;
160 attr->hw_ver = qattr->hw_ver;
161 attr->max_qp = qattr->max_qp;
162 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
163 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
164 IB_DEVICE_RC_RNR_NAK_GEN |
165 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
166
167 attr->max_sge = qattr->max_sge;
168 attr->max_sge_rd = qattr->max_sge;
169 attr->max_cq = qattr->max_cq;
170 attr->max_cqe = qattr->max_cqe;
171 attr->max_mr = qattr->max_mr;
172 attr->max_mw = qattr->max_mw;
173 attr->max_pd = qattr->max_pd;
174 attr->atomic_cap = dev->atomic_cap;
175 attr->max_fmr = qattr->max_fmr;
176 attr->max_map_per_fmr = 16;
177 attr->max_qp_init_rd_atom =
178 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
179 attr->max_qp_rd_atom =
180 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
181 attr->max_qp_init_rd_atom);
182
183 attr->max_srq = qattr->max_srq;
184 attr->max_srq_sge = qattr->max_srq_sge;
185 attr->max_srq_wr = qattr->max_srq_wr;
186
187 attr->local_ca_ack_delay = qattr->dev_ack_delay;
188 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
189 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
190 attr->max_ah = qattr->max_ah;
191
192 return 0;
193}
194
195#define QEDR_SPEED_SDR (1)
196#define QEDR_SPEED_DDR (2)
197#define QEDR_SPEED_QDR (4)
198#define QEDR_SPEED_FDR10 (8)
199#define QEDR_SPEED_FDR (16)
200#define QEDR_SPEED_EDR (32)
201
202static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
203 u8 *ib_width)
204{
205 switch (speed) {
206 case 1000:
207 *ib_speed = QEDR_SPEED_SDR;
208 *ib_width = IB_WIDTH_1X;
209 break;
210 case 10000:
211 *ib_speed = QEDR_SPEED_QDR;
212 *ib_width = IB_WIDTH_1X;
213 break;
214
215 case 20000:
216 *ib_speed = QEDR_SPEED_DDR;
217 *ib_width = IB_WIDTH_4X;
218 break;
219
220 case 25000:
221 *ib_speed = QEDR_SPEED_EDR;
222 *ib_width = IB_WIDTH_1X;
223 break;
224
225 case 40000:
226 *ib_speed = QEDR_SPEED_QDR;
227 *ib_width = IB_WIDTH_4X;
228 break;
229
230 case 50000:
231 *ib_speed = QEDR_SPEED_QDR;
232 *ib_width = IB_WIDTH_4X;
233 break;
234
235 case 100000:
236 *ib_speed = QEDR_SPEED_EDR;
237 *ib_width = IB_WIDTH_4X;
238 break;
239
240 default:
241 /* Unsupported */
242 *ib_speed = QEDR_SPEED_SDR;
243 *ib_width = IB_WIDTH_1X;
244 }
245}
246
247int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
248{
249 struct qedr_dev *dev;
250 struct qed_rdma_port *rdma_port;
251
252 dev = get_qedr_dev(ibdev);
253 if (port > 1) {
254 DP_ERR(dev, "invalid_port=0x%x\n", port);
255 return -EINVAL;
256 }
257
258 if (!dev->rdma_ctx) {
259 DP_ERR(dev, "rdma_ctx is NULL\n");
260 return -EINVAL;
261 }
262
263 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300264
Or Gerlitzc4550c62017-01-24 13:02:39 +0200265 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300266 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
267 attr->state = IB_PORT_ACTIVE;
268 attr->phys_state = 5;
269 } else {
270 attr->state = IB_PORT_DOWN;
271 attr->phys_state = 3;
272 }
273 attr->max_mtu = IB_MTU_4096;
274 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
275 attr->lid = 0;
276 attr->lmc = 0;
277 attr->sm_lid = 0;
278 attr->sm_sl = 0;
279 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
Kalderon, Michalf5b1b172017-07-26 14:41:53 +0300280 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
281 attr->gid_tbl_len = 1;
282 attr->pkey_tbl_len = 1;
283 } else {
284 attr->gid_tbl_len = QEDR_MAX_SGID;
285 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
286 }
Ram Amraniac1b36e2016-10-10 13:15:32 +0300287 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
288 attr->qkey_viol_cntr = 0;
289 get_link_speed_and_width(rdma_port->link_speed,
290 &attr->active_speed, &attr->active_width);
291 attr->max_msg_sz = rdma_port->max_msg_size;
292 attr->max_vl_num = 4;
293
294 return 0;
295}
296
297int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
298 struct ib_port_modify *props)
299{
300 struct qedr_dev *dev;
301
302 dev = get_qedr_dev(ibdev);
303 if (port > 1) {
304 DP_ERR(dev, "invalid_port=0x%x\n", port);
305 return -EINVAL;
306 }
307
308 return 0;
309}
310
311static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
312 unsigned long len)
313{
314 struct qedr_mm *mm;
315
316 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
317 if (!mm)
318 return -ENOMEM;
319
320 mm->key.phy_addr = phy_addr;
321 /* This function might be called with a length which is not a multiple
322 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
323 * forces this granularity by increasing the requested size if needed.
324 * When qedr_mmap is called, it will search the list with the updated
325 * length as a key. To prevent search failures, the length is rounded up
326 * in advance to PAGE_SIZE.
327 */
328 mm->key.len = roundup(len, PAGE_SIZE);
329 INIT_LIST_HEAD(&mm->entry);
330
331 mutex_lock(&uctx->mm_list_lock);
332 list_add(&mm->entry, &uctx->mm_head);
333 mutex_unlock(&uctx->mm_list_lock);
334
335 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
336 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
337 (unsigned long long)mm->key.phy_addr,
338 (unsigned long)mm->key.len, uctx);
339
340 return 0;
341}
342
343static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
344 unsigned long len)
345{
346 bool found = false;
347 struct qedr_mm *mm;
348
349 mutex_lock(&uctx->mm_list_lock);
350 list_for_each_entry(mm, &uctx->mm_head, entry) {
351 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
352 continue;
353
354 found = true;
355 break;
356 }
357 mutex_unlock(&uctx->mm_list_lock);
358 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
359 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
360 mm->key.phy_addr, mm->key.len, uctx, found);
361
362 return found;
363}
364
365struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
366 struct ib_udata *udata)
367{
368 int rc;
369 struct qedr_ucontext *ctx;
370 struct qedr_alloc_ucontext_resp uresp;
371 struct qedr_dev *dev = get_qedr_dev(ibdev);
372 struct qed_rdma_add_user_out_params oparams;
373
374 if (!udata)
375 return ERR_PTR(-EFAULT);
376
377 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
378 if (!ctx)
379 return ERR_PTR(-ENOMEM);
380
381 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
382 if (rc) {
383 DP_ERR(dev,
384 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
385 rc);
386 goto err;
387 }
388
389 ctx->dpi = oparams.dpi;
390 ctx->dpi_addr = oparams.dpi_addr;
391 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
392 ctx->dpi_size = oparams.dpi_size;
393 INIT_LIST_HEAD(&ctx->mm_head);
394 mutex_init(&ctx->mm_list_lock);
395
396 memset(&uresp, 0, sizeof(uresp));
397
398 uresp.db_pa = ctx->dpi_phys_addr;
399 uresp.db_size = ctx->dpi_size;
400 uresp.max_send_wr = dev->attr.max_sqe;
401 uresp.max_recv_wr = dev->attr.max_rqe;
402 uresp.max_srq_wr = dev->attr.max_srq_wr;
403 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
404 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
405 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
406 uresp.max_cqes = QEDR_MAX_CQES;
407
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300408 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amraniac1b36e2016-10-10 13:15:32 +0300409 if (rc)
410 goto err;
411
412 ctx->dev = dev;
413
414 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
415 if (rc)
416 goto err;
417
418 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
419 &ctx->ibucontext);
420 return &ctx->ibucontext;
421
422err:
423 kfree(ctx);
424 return ERR_PTR(rc);
425}
426
427int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
428{
429 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
430 struct qedr_mm *mm, *tmp;
431 int status = 0;
432
433 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
434 uctx);
435 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
436
437 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
438 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
439 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
440 mm->key.phy_addr, mm->key.len, uctx);
441 list_del(&mm->entry);
442 kfree(mm);
443 }
444
445 kfree(uctx);
446 return status;
447}
448
449int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
450{
451 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
452 struct qedr_dev *dev = get_qedr_dev(context->device);
453 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
454 u64 unmapped_db = dev->db_phys_addr;
455 unsigned long len = (vma->vm_end - vma->vm_start);
456 int rc = 0;
457 bool found;
458
459 DP_DEBUG(dev, QEDR_MSG_INIT,
460 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
461 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
462 if (vma->vm_start & (PAGE_SIZE - 1)) {
463 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
464 vma->vm_start);
465 return -EINVAL;
466 }
467
468 found = qedr_search_mmap(ucontext, vm_page, len);
469 if (!found) {
470 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
471 vma->vm_pgoff);
472 return -EINVAL;
473 }
474
475 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
476
477 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
478 dev->db_size))) {
479 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
480 if (vma->vm_flags & VM_READ) {
481 DP_ERR(dev, "Trying to map doorbell bar for read\n");
482 return -EPERM;
483 }
484
485 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
486
487 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
488 PAGE_SIZE, vma->vm_page_prot);
489 } else {
490 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
491 rc = remap_pfn_range(vma, vma->vm_start,
492 vma->vm_pgoff, len, vma->vm_page_prot);
493 }
494 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
495 return rc;
496}
Ram Amrania7efd772016-10-10 13:15:33 +0300497
498struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
499 struct ib_ucontext *context, struct ib_udata *udata)
500{
501 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300502 struct qedr_pd *pd;
503 u16 pd_id;
504 int rc;
505
506 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
507 (udata && context) ? "User Lib" : "Kernel");
508
509 if (!dev->rdma_ctx) {
510 DP_ERR(dev, "invlaid RDMA context\n");
511 return ERR_PTR(-EINVAL);
512 }
513
514 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
515 if (!pd)
516 return ERR_PTR(-ENOMEM);
517
Ram Amrani9c1e0222017-01-24 13:51:42 +0200518 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
519 if (rc)
520 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300521
Ram Amrania7efd772016-10-10 13:15:33 +0300522 pd->pd_id = pd_id;
523
524 if (udata && context) {
Ram Amrani9c1e0222017-01-24 13:51:42 +0200525 struct qedr_alloc_pd_uresp uresp;
526
527 uresp.pd_id = pd_id;
528
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300529 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200530 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300531 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200532 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
533 goto err;
534 }
535
536 pd->uctx = get_qedr_ucontext(context);
537 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300538 }
539
540 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200541
542err:
543 kfree(pd);
544 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300545}
546
547int qedr_dealloc_pd(struct ib_pd *ibpd)
548{
549 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
550 struct qedr_pd *pd = get_qedr_pd(ibpd);
551
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100552 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300553 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100554 return -EINVAL;
555 }
Ram Amrania7efd772016-10-10 13:15:33 +0300556
557 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
558 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
559
560 kfree(pd);
561
562 return 0;
563}
564
565static void qedr_free_pbl(struct qedr_dev *dev,
566 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
567{
568 struct pci_dev *pdev = dev->pdev;
569 int i;
570
571 for (i = 0; i < pbl_info->num_pbls; i++) {
572 if (!pbl[i].va)
573 continue;
574 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
575 pbl[i].va, pbl[i].pa);
576 }
577
578 kfree(pbl);
579}
580
581#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
582#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
583
584#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
585#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
586#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
587
588static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
589 struct qedr_pbl_info *pbl_info,
590 gfp_t flags)
591{
592 struct pci_dev *pdev = dev->pdev;
593 struct qedr_pbl *pbl_table;
594 dma_addr_t *pbl_main_tbl;
595 dma_addr_t pa;
596 void *va;
597 int i;
598
599 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
600 if (!pbl_table)
601 return ERR_PTR(-ENOMEM);
602
603 for (i = 0; i < pbl_info->num_pbls; i++) {
604 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
605 &pa, flags);
606 if (!va)
607 goto err;
608
609 memset(va, 0, pbl_info->pbl_size);
610 pbl_table[i].va = va;
611 pbl_table[i].pa = pa;
612 }
613
614 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
615 * the first one with physical pointers to all of the rest
616 */
617 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
618 for (i = 0; i < pbl_info->num_pbls - 1; i++)
619 pbl_main_tbl[i] = pbl_table[i + 1].pa;
620
621 return pbl_table;
622
623err:
624 for (i--; i >= 0; i--)
625 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
626 pbl_table[i].va, pbl_table[i].pa);
627
628 qedr_free_pbl(dev, pbl_info, pbl_table);
629
630 return ERR_PTR(-ENOMEM);
631}
632
633static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
634 struct qedr_pbl_info *pbl_info,
635 u32 num_pbes, int two_layer_capable)
636{
637 u32 pbl_capacity;
638 u32 pbl_size;
639 u32 num_pbls;
640
641 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
642 if (num_pbes > MAX_PBES_TWO_LAYER) {
643 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
644 num_pbes);
645 return -EINVAL;
646 }
647
648 /* calculate required pbl page size */
649 pbl_size = MIN_FW_PBL_PAGE_SIZE;
650 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
651 NUM_PBES_ON_PAGE(pbl_size);
652
653 while (pbl_capacity < num_pbes) {
654 pbl_size *= 2;
655 pbl_capacity = pbl_size / sizeof(u64);
656 pbl_capacity = pbl_capacity * pbl_capacity;
657 }
658
659 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
660 num_pbls++; /* One for the layer0 ( points to the pbls) */
661 pbl_info->two_layered = true;
662 } else {
663 /* One layered PBL */
664 num_pbls = 1;
665 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
666 roundup_pow_of_two((num_pbes * sizeof(u64))));
667 pbl_info->two_layered = false;
668 }
669
670 pbl_info->num_pbls = num_pbls;
671 pbl_info->pbl_size = pbl_size;
672 pbl_info->num_pbes = num_pbes;
673
674 DP_DEBUG(dev, QEDR_MSG_MR,
675 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
676 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
677
678 return 0;
679}
680
681static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
682 struct qedr_pbl *pbl,
Ram Amranie57bb6b2017-06-05 16:32:27 +0300683 struct qedr_pbl_info *pbl_info, u32 pg_shift)
Ram Amrania7efd772016-10-10 13:15:33 +0300684{
685 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300686 u32 fw_pg_cnt, fw_pg_per_umem_pg;
Ram Amrania7efd772016-10-10 13:15:33 +0300687 struct qedr_pbl *pbl_tbl;
688 struct scatterlist *sg;
689 struct regpair *pbe;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300690 u64 pg_addr;
Ram Amrania7efd772016-10-10 13:15:33 +0300691 int entry;
Ram Amrania7efd772016-10-10 13:15:33 +0300692
693 if (!pbl_info->num_pbes)
694 return;
695
696 /* If we have a two layered pbl, the first pbl points to the rest
697 * of the pbls and the first entry lays on the second pbl in the table
698 */
699 if (pbl_info->two_layered)
700 pbl_tbl = &pbl[1];
701 else
702 pbl_tbl = pbl;
703
704 pbe = (struct regpair *)pbl_tbl->va;
705 if (!pbe) {
706 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
707 return;
708 }
709
710 pbe_cnt = 0;
711
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300712 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300713
Ram Amranie57bb6b2017-06-05 16:32:27 +0300714 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
715
Ram Amrania7efd772016-10-10 13:15:33 +0300716 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
717 pages = sg_dma_len(sg) >> shift;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300718 pg_addr = sg_dma_address(sg);
Ram Amrania7efd772016-10-10 13:15:33 +0300719 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
Ram Amranie57bb6b2017-06-05 16:32:27 +0300720 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
721 pbe->lo = cpu_to_le32(pg_addr);
722 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
Ram Amrania7efd772016-10-10 13:15:33 +0300723
Ram Amranie57bb6b2017-06-05 16:32:27 +0300724 pg_addr += BIT(pg_shift);
725 pbe_cnt++;
726 total_num_pbes++;
727 pbe++;
Ram Amrania7efd772016-10-10 13:15:33 +0300728
Ram Amranie57bb6b2017-06-05 16:32:27 +0300729 if (total_num_pbes == pbl_info->num_pbes)
730 return;
731
732 /* If the given pbl is full storing the pbes,
733 * move to next pbl.
734 */
735 if (pbe_cnt ==
736 (pbl_info->pbl_size / sizeof(u64))) {
737 pbl_tbl++;
738 pbe = (struct regpair *)pbl_tbl->va;
739 pbe_cnt = 0;
740 }
741
742 fw_pg_cnt++;
Ram Amrania7efd772016-10-10 13:15:33 +0300743 }
744 }
745 }
746}
747
748static int qedr_copy_cq_uresp(struct qedr_dev *dev,
749 struct qedr_cq *cq, struct ib_udata *udata)
750{
751 struct qedr_create_cq_uresp uresp;
752 int rc;
753
754 memset(&uresp, 0, sizeof(uresp));
755
756 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
757 uresp.icid = cq->icid;
758
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300759 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrania7efd772016-10-10 13:15:33 +0300760 if (rc)
761 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
762
763 return rc;
764}
765
766static void consume_cqe(struct qedr_cq *cq)
767{
768 if (cq->latest_cqe == cq->toggle_cqe)
769 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
770
771 cq->latest_cqe = qed_chain_consume(&cq->pbl);
772}
773
774static inline int qedr_align_cq_entries(int entries)
775{
776 u64 size, aligned_size;
777
778 /* We allocate an extra entry that we don't report to the FW. */
779 size = (entries + 1) * QEDR_CQE_SIZE;
780 aligned_size = ALIGN(size, PAGE_SIZE);
781
782 return aligned_size / QEDR_CQE_SIZE;
783}
784
785static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
786 struct qedr_dev *dev,
787 struct qedr_userq *q,
788 u64 buf_addr, size_t buf_len,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300789 int access, int dmasync,
790 int alloc_and_init)
Ram Amrania7efd772016-10-10 13:15:33 +0300791{
Ram Amranie57bb6b2017-06-05 16:32:27 +0300792 u32 fw_pages;
Ram Amrania7efd772016-10-10 13:15:33 +0300793 int rc;
794
795 q->buf_addr = buf_addr;
796 q->buf_len = buf_len;
797 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
798 if (IS_ERR(q->umem)) {
799 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
800 PTR_ERR(q->umem));
801 return PTR_ERR(q->umem);
802 }
803
Ram Amranie57bb6b2017-06-05 16:32:27 +0300804 fw_pages = ib_umem_page_count(q->umem) <<
805 (q->umem->page_shift - FW_PAGE_SHIFT);
806
807 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
Ram Amrania7efd772016-10-10 13:15:33 +0300808 if (rc)
809 goto err0;
810
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300811 if (alloc_and_init) {
812 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
813 if (IS_ERR(q->pbl_tbl)) {
814 rc = PTR_ERR(q->pbl_tbl);
815 goto err0;
816 }
Ram Amranie57bb6b2017-06-05 16:32:27 +0300817 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
818 FW_PAGE_SHIFT);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300819 } else {
820 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
821 if (!q->pbl_tbl)
822 goto err0;
823 }
Ram Amrania7efd772016-10-10 13:15:33 +0300824
825 return 0;
826
827err0:
828 ib_umem_release(q->umem);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300829 q->umem = NULL;
Ram Amrania7efd772016-10-10 13:15:33 +0300830
831 return rc;
832}
833
834static inline void qedr_init_cq_params(struct qedr_cq *cq,
835 struct qedr_ucontext *ctx,
836 struct qedr_dev *dev, int vector,
837 int chain_entries, int page_cnt,
838 u64 pbl_ptr,
839 struct qed_rdma_create_cq_in_params
840 *params)
841{
842 memset(params, 0, sizeof(*params));
843 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
844 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
845 params->cnq_id = vector;
846 params->cq_size = chain_entries - 1;
847 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
848 params->pbl_num_pages = page_cnt;
849 params->pbl_ptr = pbl_ptr;
850 params->pbl_two_level = 0;
851}
852
853static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
854{
855 /* Flush data before signalling doorbell */
856 wmb();
857 cq->db.data.agg_flags = flags;
858 cq->db.data.value = cpu_to_le32(cons);
859 writeq(cq->db.raw, cq->db_addr);
860
861 /* Make sure write would stick */
862 mmiowb();
863}
864
865int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
866{
867 struct qedr_cq *cq = get_qedr_cq(ibcq);
868 unsigned long sflags;
Amrani, Ram4dd72632017-04-27 13:35:34 +0300869 struct qedr_dev *dev;
870
871 dev = get_qedr_dev(ibcq->device);
872
873 if (cq->destroyed) {
874 DP_ERR(dev,
875 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
876 cq, cq->icid);
877 return -EINVAL;
878 }
879
Ram Amrania7efd772016-10-10 13:15:33 +0300880
881 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
882 return 0;
883
884 spin_lock_irqsave(&cq->cq_lock, sflags);
885
886 cq->arm_flags = 0;
887
888 if (flags & IB_CQ_SOLICITED)
889 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
890
891 if (flags & IB_CQ_NEXT_COMP)
892 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
893
894 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
895
896 spin_unlock_irqrestore(&cq->cq_lock, sflags);
897
898 return 0;
899}
900
901struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
902 const struct ib_cq_init_attr *attr,
903 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
904{
905 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
906 struct qed_rdma_destroy_cq_out_params destroy_oparams;
907 struct qed_rdma_destroy_cq_in_params destroy_iparams;
908 struct qedr_dev *dev = get_qedr_dev(ibdev);
909 struct qed_rdma_create_cq_in_params params;
910 struct qedr_create_cq_ureq ureq;
911 int vector = attr->comp_vector;
912 int entries = attr->cqe;
913 struct qedr_cq *cq;
914 int chain_entries;
915 int page_cnt;
916 u64 pbl_ptr;
917 u16 icid;
918 int rc;
919
920 DP_DEBUG(dev, QEDR_MSG_INIT,
921 "create_cq: called from %s. entries=%d, vector=%d\n",
922 udata ? "User Lib" : "Kernel", entries, vector);
923
924 if (entries > QEDR_MAX_CQES) {
925 DP_ERR(dev,
926 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
927 entries, QEDR_MAX_CQES);
928 return ERR_PTR(-EINVAL);
929 }
930
931 chain_entries = qedr_align_cq_entries(entries);
932 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
933
934 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
935 if (!cq)
936 return ERR_PTR(-ENOMEM);
937
938 if (udata) {
939 memset(&ureq, 0, sizeof(ureq));
940 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
941 DP_ERR(dev,
942 "create cq: problem copying data from user space\n");
943 goto err0;
944 }
945
946 if (!ureq.len) {
947 DP_ERR(dev,
948 "create cq: cannot create a cq with 0 entries\n");
949 goto err0;
950 }
951
952 cq->cq_type = QEDR_CQ_TYPE_USER;
953
954 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300955 ureq.len, IB_ACCESS_LOCAL_WRITE,
956 1, 1);
Ram Amrania7efd772016-10-10 13:15:33 +0300957 if (rc)
958 goto err0;
959
960 pbl_ptr = cq->q.pbl_tbl->pa;
961 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200962
963 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300964 } else {
965 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
966
967 rc = dev->ops->common->chain_alloc(dev->cdev,
968 QED_CHAIN_USE_TO_CONSUME,
969 QED_CHAIN_MODE_PBL,
970 QED_CHAIN_CNT_TYPE_U32,
971 chain_entries,
972 sizeof(union rdma_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +0300973 &cq->pbl, NULL);
Ram Amrania7efd772016-10-10 13:15:33 +0300974 if (rc)
975 goto err1;
976
977 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
978 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200979 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300980 }
981
982 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
983 pbl_ptr, &params);
984
985 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
986 if (rc)
987 goto err2;
988
989 cq->icid = icid;
990 cq->sig = QEDR_CQ_MAGIC_NUMBER;
991 spin_lock_init(&cq->cq_lock);
992
993 if (ib_ctx) {
994 rc = qedr_copy_cq_uresp(dev, cq, udata);
995 if (rc)
996 goto err3;
997 } else {
998 /* Generate doorbell address. */
999 cq->db_addr = dev->db_addr +
1000 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1001 cq->db.data.icid = cq->icid;
1002 cq->db.data.params = DB_AGG_CMD_SET <<
1003 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1004
1005 /* point to the very last element, passing it we will toggle */
1006 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1007 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1008 cq->latest_cqe = NULL;
1009 consume_cqe(cq);
1010 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1011 }
1012
1013 DP_DEBUG(dev, QEDR_MSG_CQ,
1014 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1015 cq->icid, cq, params.cq_size);
1016
1017 return &cq->ibcq;
1018
1019err3:
1020 destroy_iparams.icid = cq->icid;
1021 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1022 &destroy_oparams);
1023err2:
1024 if (udata)
1025 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1026 else
1027 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1028err1:
1029 if (udata)
1030 ib_umem_release(cq->q.umem);
1031err0:
1032 kfree(cq);
1033 return ERR_PTR(-EINVAL);
1034}
1035
1036int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1037{
1038 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1039 struct qedr_cq *cq = get_qedr_cq(ibcq);
1040
1041 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1042
1043 return 0;
1044}
1045
Amrani, Ram4dd72632017-04-27 13:35:34 +03001046#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1047#define QEDR_DESTROY_CQ_ITER_DURATION (10)
1048
Ram Amrania7efd772016-10-10 13:15:33 +03001049int qedr_destroy_cq(struct ib_cq *ibcq)
1050{
1051 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1052 struct qed_rdma_destroy_cq_out_params oparams;
1053 struct qed_rdma_destroy_cq_in_params iparams;
1054 struct qedr_cq *cq = get_qedr_cq(ibcq);
Amrani, Ram4dd72632017-04-27 13:35:34 +03001055 int iter;
Amrani, Ram942b3b22017-04-27 13:35:33 +03001056 int rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001057
Amrani, Ram942b3b22017-04-27 13:35:33 +03001058 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
Ram Amrania7efd772016-10-10 13:15:33 +03001059
Amrani, Ram4dd72632017-04-27 13:35:34 +03001060 cq->destroyed = 1;
1061
Ram Amrania7efd772016-10-10 13:15:33 +03001062 /* GSIs CQs are handled by driver, so they don't exist in the FW */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001063 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1064 goto done;
Amrani, Rama1211352016-12-22 14:40:34 +02001065
Amrani, Ram942b3b22017-04-27 13:35:33 +03001066 iparams.icid = cq->icid;
1067 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1068 if (rc)
1069 return rc;
1070
1071 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +03001072
1073 if (ibcq->uobject && ibcq->uobject->context) {
1074 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1075 ib_umem_release(cq->q.umem);
1076 }
1077
Amrani, Ram4dd72632017-04-27 13:35:34 +03001078 /* We don't want the IRQ handler to handle a non-existing CQ so we
1079 * wait until all CNQ interrupts, if any, are received. This will always
1080 * happen and will always happen very fast. If not, then a serious error
1081 * has occured. That is why we can use a long delay.
1082 * We spin for a short time so we don’t lose time on context switching
1083 * in case all the completions are handled in that span. Otherwise
1084 * we sleep for a while and check again. Since the CNQ may be
1085 * associated with (only) the current CPU we use msleep to allow the
1086 * current CPU to be freed.
1087 * The CNQ notification is increased in qedr_irq_handler().
1088 */
1089 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1090 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1091 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1092 iter--;
1093 }
1094
1095 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1096 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1097 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1098 iter--;
1099 }
1100
1101 if (oparams.num_cq_notif != cq->cnq_notif)
1102 goto err;
1103
1104 /* Note that we don't need to have explicit code to wait for the
1105 * completion of the event handler because it is invoked from the EQ.
1106 * Since the destroy CQ ramrod has also been received on the EQ we can
1107 * be certain that there's no event handler in process.
1108 */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001109done:
Amrani, Ram4dd72632017-04-27 13:35:34 +03001110 cq->sig = ~cq->sig;
1111
Ram Amrania7efd772016-10-10 13:15:33 +03001112 kfree(cq);
1113
1114 return 0;
Amrani, Ram4dd72632017-04-27 13:35:34 +03001115
1116err:
1117 DP_ERR(dev,
1118 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1119 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1120
1121 return -EINVAL;
Ram Amrania7efd772016-10-10 13:15:33 +03001122}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001123
1124static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1125 struct ib_qp_attr *attr,
1126 int attr_mask,
1127 struct qed_rdma_modify_qp_in_params
1128 *qp_params)
1129{
1130 enum rdma_network_type nw_type;
1131 struct ib_gid_attr gid_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001132 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001133 union ib_gid gid;
1134 u32 ipv4_addr;
1135 int rc = 0;
1136 int i;
1137
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001138 rc = ib_get_cached_gid(ibqp->device,
1139 rdma_ah_get_port_num(&attr->ah_attr),
1140 grh->sgid_index, &gid, &gid_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001141 if (rc)
1142 return rc;
1143
1144 if (!memcmp(&gid, &zgid, sizeof(gid)))
1145 return -ENOENT;
1146
1147 if (gid_attr.ndev) {
1148 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1149
1150 dev_put(gid_attr.ndev);
1151 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1152 switch (nw_type) {
1153 case RDMA_NETWORK_IPV6:
1154 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1155 sizeof(qp_params->sgid));
1156 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001157 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001158 sizeof(qp_params->dgid));
1159 qp_params->roce_mode = ROCE_V2_IPV6;
1160 SET_FIELD(qp_params->modify_flags,
1161 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1162 break;
1163 case RDMA_NETWORK_IB:
1164 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1165 sizeof(qp_params->sgid));
1166 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001167 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001168 sizeof(qp_params->dgid));
1169 qp_params->roce_mode = ROCE_V1;
1170 break;
1171 case RDMA_NETWORK_IPV4:
1172 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1173 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1174 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1175 qp_params->sgid.ipv4_addr = ipv4_addr;
1176 ipv4_addr =
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001177 qedr_get_ipv4_from_gid(grh->dgid.raw);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001178 qp_params->dgid.ipv4_addr = ipv4_addr;
1179 SET_FIELD(qp_params->modify_flags,
1180 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1181 qp_params->roce_mode = ROCE_V2_IPV4;
1182 break;
1183 }
1184 }
1185
1186 for (i = 0; i < 4; i++) {
1187 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1188 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1189 }
1190
1191 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1192 qp_params->vlan_id = 0;
1193
1194 return 0;
1195}
1196
Ram Amranicecbcdd2016-10-10 13:15:34 +03001197static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1198 struct ib_qp_init_attr *attrs)
1199{
1200 struct qedr_device_attr *qattr = &dev->attr;
1201
1202 /* QP0... attrs->qp_type == IB_QPT_GSI */
1203 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1204 DP_DEBUG(dev, QEDR_MSG_QP,
1205 "create qp: unsupported qp type=0x%x requested\n",
1206 attrs->qp_type);
1207 return -EINVAL;
1208 }
1209
1210 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1211 DP_ERR(dev,
1212 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1213 attrs->cap.max_send_wr, qattr->max_sqe);
1214 return -EINVAL;
1215 }
1216
1217 if (attrs->cap.max_inline_data > qattr->max_inline) {
1218 DP_ERR(dev,
1219 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1220 attrs->cap.max_inline_data, qattr->max_inline);
1221 return -EINVAL;
1222 }
1223
1224 if (attrs->cap.max_send_sge > qattr->max_sge) {
1225 DP_ERR(dev,
1226 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1227 attrs->cap.max_send_sge, qattr->max_sge);
1228 return -EINVAL;
1229 }
1230
1231 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1232 DP_ERR(dev,
1233 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1234 attrs->cap.max_recv_sge, qattr->max_sge);
1235 return -EINVAL;
1236 }
1237
1238 /* Unprivileged user space cannot create special QP */
1239 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1240 DP_ERR(dev,
1241 "create qp: userspace can't create special QPs of type=0x%x\n",
1242 attrs->qp_type);
1243 return -EINVAL;
1244 }
1245
1246 return 0;
1247}
1248
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001249static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1250 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001251 struct qedr_qp *qp)
1252{
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001253 /* iWARP requires two doorbells per RQ. */
1254 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1255 uresp->rq_db_offset =
1256 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1257 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1258 } else {
1259 uresp->rq_db_offset =
1260 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1261 }
1262
Ram Amranicecbcdd2016-10-10 13:15:34 +03001263 uresp->rq_icid = qp->icid;
1264}
1265
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001266static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1267 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001268 struct qedr_qp *qp)
1269{
1270 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001271
1272 /* iWARP uses the same cid for rq and sq */
1273 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1274 uresp->sq_icid = qp->icid;
1275 else
1276 uresp->sq_icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001277}
1278
1279static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1280 struct qedr_qp *qp, struct ib_udata *udata)
1281{
1282 struct qedr_create_qp_uresp uresp;
1283 int rc;
1284
1285 memset(&uresp, 0, sizeof(uresp));
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001286 qedr_copy_sq_uresp(dev, &uresp, qp);
1287 qedr_copy_rq_uresp(dev, &uresp, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001288
1289 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1290 uresp.qp_id = qp->qp_id;
1291
Amrani, Ramc75d3ec2017-06-26 19:05:04 +03001292 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranicecbcdd2016-10-10 13:15:34 +03001293 if (rc)
1294 DP_ERR(dev,
1295 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1296 qp->icid);
1297
1298 return rc;
1299}
1300
Amrani, Ramdf158562016-12-22 14:52:24 +02001301static void qedr_set_common_qp_params(struct qedr_dev *dev,
1302 struct qedr_qp *qp,
1303 struct qedr_pd *pd,
1304 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001305{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001306 spin_lock_init(&qp->q_lock);
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001307 atomic_set(&qp->refcnt, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001308 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001309 qp->qp_type = attrs->qp_type;
1310 qp->max_inline_data = attrs->cap.max_inline_data;
1311 qp->sq.max_sges = attrs->cap.max_send_sge;
1312 qp->state = QED_ROCE_QP_STATE_RESET;
1313 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1314 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1315 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1316 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001317 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001318
1319 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001320 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1321 qp->rq.max_sges, qp->rq_cq->icid);
1322 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001323 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1324 pd->pd_id, qp->qp_type, qp->max_inline_data,
1325 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1326 DP_DEBUG(dev, QEDR_MSG_QP,
1327 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1328 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001329}
1330
Amrani, Ramdf158562016-12-22 14:52:24 +02001331static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001332{
1333 qp->sq.db = dev->db_addr +
1334 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1335 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001336 qp->rq.db = dev->db_addr +
1337 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1338 qp->rq.db_data.data.icid = qp->icid;
1339}
1340
Amrani, Ramdf158562016-12-22 14:52:24 +02001341static inline void
1342qedr_init_common_qp_in_params(struct qedr_dev *dev,
1343 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001344 struct qedr_qp *qp,
1345 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001346 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001347 struct qed_rdma_create_qp_in_params *params)
1348{
Amrani, Ramdf158562016-12-22 14:52:24 +02001349 /* QP handle to be written in an async event */
1350 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1351 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001352
Amrani, Ramdf158562016-12-22 14:52:24 +02001353 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1354 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1355 params->pd = pd->pd_id;
1356 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1357 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1358 params->stats_queue = 0;
1359 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1360 params->srq_id = 0;
1361 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001362}
1363
Amrani, Ramdf158562016-12-22 14:52:24 +02001364static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001365{
Amrani, Ramdf158562016-12-22 14:52:24 +02001366 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1367 "qp=%p. "
1368 "sq_addr=0x%llx, "
1369 "sq_len=%zd, "
1370 "rq_addr=0x%llx, "
1371 "rq_len=%zd"
1372 "\n",
1373 qp,
1374 qp->usq.buf_addr,
1375 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1376}
1377
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001378static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
1379{
1380 int rc;
1381
1382 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1383 return 0;
1384
1385 idr_preload(GFP_KERNEL);
1386 spin_lock_irq(&dev->idr_lock);
1387
1388 rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
1389
1390 spin_unlock_irq(&dev->idr_lock);
1391 idr_preload_end();
1392
1393 return rc < 0 ? rc : 0;
1394}
1395
1396static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
1397{
1398 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1399 return;
1400
1401 spin_lock_irq(&dev->idr_lock);
1402 idr_remove(&dev->qpidr, id);
1403 spin_unlock_irq(&dev->idr_lock);
1404}
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001405
1406static inline void
1407qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1408 struct qedr_qp *qp,
1409 struct qed_rdma_create_qp_out_params *out_params)
1410{
1411 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1412 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1413
1414 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1415 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1416
1417 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1418 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1419
1420 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1421 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1422}
1423
Amrani, Ramdf158562016-12-22 14:52:24 +02001424static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1425{
1426 if (qp->usq.umem)
1427 ib_umem_release(qp->usq.umem);
1428 qp->usq.umem = NULL;
1429
1430 if (qp->urq.umem)
1431 ib_umem_release(qp->urq.umem);
1432 qp->urq.umem = NULL;
1433}
1434
1435static int qedr_create_user_qp(struct qedr_dev *dev,
1436 struct qedr_qp *qp,
1437 struct ib_pd *ibpd,
1438 struct ib_udata *udata,
1439 struct ib_qp_init_attr *attrs)
1440{
1441 struct qed_rdma_create_qp_in_params in_params;
1442 struct qed_rdma_create_qp_out_params out_params;
1443 struct qedr_pd *pd = get_qedr_pd(ibpd);
1444 struct ib_ucontext *ib_ctx = NULL;
1445 struct qedr_ucontext *ctx = NULL;
1446 struct qedr_create_qp_ureq ureq;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001447 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001448 int rc = -EINVAL;
1449
1450 ib_ctx = ibpd->uobject->context;
1451 ctx = get_qedr_ucontext(ib_ctx);
1452
1453 memset(&ureq, 0, sizeof(ureq));
1454 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1455 if (rc) {
1456 DP_ERR(dev, "Problem copying data from user space\n");
1457 return rc;
1458 }
1459
1460 /* SQ - read access only (0), dma sync not required (0) */
1461 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001462 ureq.sq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001463 if (rc)
1464 return rc;
1465
1466 /* RQ - read access only (0), dma sync not required (0) */
1467 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001468 ureq.rq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001469 if (rc)
1470 return rc;
1471
1472 memset(&in_params, 0, sizeof(in_params));
1473 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1474 in_params.qp_handle_lo = ureq.qp_handle_lo;
1475 in_params.qp_handle_hi = ureq.qp_handle_hi;
1476 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1477 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1478 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1479 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1480
1481 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1482 &in_params, &out_params);
1483
1484 if (!qp->qed_qp) {
1485 rc = -ENOMEM;
1486 goto err1;
1487 }
1488
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001489 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1490 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1491
Amrani, Ramdf158562016-12-22 14:52:24 +02001492 qp->qp_id = out_params.qp_id;
1493 qp->icid = out_params.icid;
1494
1495 rc = qedr_copy_qp_uresp(dev, qp, udata);
1496 if (rc)
1497 goto err;
1498
1499 qedr_qp_user_print(dev, qp);
1500
1501 return 0;
1502err:
1503 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1504 if (rc)
1505 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1506
1507err1:
1508 qedr_cleanup_user(dev, qp);
1509 return rc;
1510}
1511
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001512static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1513{
1514 qp->sq.db = dev->db_addr +
1515 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1516 qp->sq.db_data.data.icid = qp->icid;
1517
1518 qp->rq.db = dev->db_addr +
1519 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1520 qp->rq.db_data.data.icid = qp->icid;
1521 qp->rq.iwarp_db2 = dev->db_addr +
1522 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1523 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1524 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1525}
1526
Amrani, Ramdf158562016-12-22 14:52:24 +02001527static int
1528qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1529 struct qedr_qp *qp,
1530 struct qed_rdma_create_qp_in_params *in_params,
1531 u32 n_sq_elems, u32 n_rq_elems)
1532{
1533 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001534 int rc;
1535
Ram Amranicecbcdd2016-10-10 13:15:34 +03001536 rc = dev->ops->common->chain_alloc(dev->cdev,
1537 QED_CHAIN_USE_TO_PRODUCE,
1538 QED_CHAIN_MODE_PBL,
1539 QED_CHAIN_CNT_TYPE_U32,
1540 n_sq_elems,
1541 QEDR_SQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001542 &qp->sq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001543
1544 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001545 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001546
Amrani, Ramdf158562016-12-22 14:52:24 +02001547 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1548 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001549
Ram Amranicecbcdd2016-10-10 13:15:34 +03001550 rc = dev->ops->common->chain_alloc(dev->cdev,
1551 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1552 QED_CHAIN_MODE_PBL,
1553 QED_CHAIN_CNT_TYPE_U32,
1554 n_rq_elems,
1555 QEDR_RQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001556 &qp->rq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001557 if (rc)
1558 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001559
Amrani, Ramdf158562016-12-22 14:52:24 +02001560 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1561 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001562
Amrani, Ramdf158562016-12-22 14:52:24 +02001563 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1564 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001565
Amrani, Ramdf158562016-12-22 14:52:24 +02001566 if (!qp->qed_qp)
1567 return -EINVAL;
1568
1569 qp->qp_id = out_params.qp_id;
1570 qp->icid = out_params.icid;
1571
1572 qedr_set_roce_db_info(dev, qp);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001573 return rc;
1574}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001575
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001576static int
1577qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1578 struct qedr_qp *qp,
1579 struct qed_rdma_create_qp_in_params *in_params,
1580 u32 n_sq_elems, u32 n_rq_elems)
1581{
1582 struct qed_rdma_create_qp_out_params out_params;
1583 struct qed_chain_ext_pbl ext_pbl;
1584 int rc;
1585
1586 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1587 QEDR_SQE_ELEMENT_SIZE,
1588 QED_CHAIN_MODE_PBL);
1589 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1590 QEDR_RQE_ELEMENT_SIZE,
1591 QED_CHAIN_MODE_PBL);
1592
1593 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1594 in_params, &out_params);
1595
1596 if (!qp->qed_qp)
1597 return -EINVAL;
1598
1599 /* Now we allocate the chain */
1600 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1601 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1602
1603 rc = dev->ops->common->chain_alloc(dev->cdev,
1604 QED_CHAIN_USE_TO_PRODUCE,
1605 QED_CHAIN_MODE_PBL,
1606 QED_CHAIN_CNT_TYPE_U32,
1607 n_sq_elems,
1608 QEDR_SQE_ELEMENT_SIZE,
1609 &qp->sq.pbl, &ext_pbl);
1610
1611 if (rc)
1612 goto err;
1613
1614 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1615 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1616
1617 rc = dev->ops->common->chain_alloc(dev->cdev,
1618 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1619 QED_CHAIN_MODE_PBL,
1620 QED_CHAIN_CNT_TYPE_U32,
1621 n_rq_elems,
1622 QEDR_RQE_ELEMENT_SIZE,
1623 &qp->rq.pbl, &ext_pbl);
1624
1625 if (rc)
1626 goto err;
1627
1628 qp->qp_id = out_params.qp_id;
1629 qp->icid = out_params.icid;
1630
1631 qedr_set_iwarp_db_info(dev, qp);
1632 return rc;
1633
1634err:
1635 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1636
1637 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001638}
1639
Amrani, Ramdf158562016-12-22 14:52:24 +02001640static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001641{
Amrani, Ramdf158562016-12-22 14:52:24 +02001642 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1643 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001644
Amrani, Ramdf158562016-12-22 14:52:24 +02001645 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1646 kfree(qp->rqe_wr_id);
1647}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001648
Amrani, Ramdf158562016-12-22 14:52:24 +02001649static int qedr_create_kernel_qp(struct qedr_dev *dev,
1650 struct qedr_qp *qp,
1651 struct ib_pd *ibpd,
1652 struct ib_qp_init_attr *attrs)
1653{
1654 struct qed_rdma_create_qp_in_params in_params;
1655 struct qedr_pd *pd = get_qedr_pd(ibpd);
1656 int rc = -EINVAL;
1657 u32 n_rq_elems;
1658 u32 n_sq_elems;
1659 u32 n_sq_entries;
1660
1661 memset(&in_params, 0, sizeof(in_params));
1662
1663 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1664 * the ring. The ring should allow at least a single WR, even if the
1665 * user requested none, due to allocation issues.
1666 * We should add an extra WR since the prod and cons indices of
1667 * wqe_wr_id are managed in such a way that the WQ is considered full
1668 * when (prod+1)%max_wr==cons. We currently don't do that because we
1669 * double the number of entries due an iSER issue that pushes far more
1670 * WRs than indicated. If we decline its ib_post_send() then we get
1671 * error prints in the dmesg we'd like to avoid.
1672 */
1673 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1674 dev->attr.max_sqe);
1675
1676 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1677 GFP_KERNEL);
1678 if (!qp->wqe_wr_id) {
1679 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1680 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001681 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001682
Amrani, Ramdf158562016-12-22 14:52:24 +02001683 /* QP handle to be written in CQE */
1684 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1685 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001686
Amrani, Ramdf158562016-12-22 14:52:24 +02001687 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1688 * the ring. There ring should allow at least a single WR, even if the
1689 * user requested none, due to allocation issues.
1690 */
1691 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1692
1693 /* Allocate driver internal RQ array */
1694 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1695 GFP_KERNEL);
1696 if (!qp->rqe_wr_id) {
1697 DP_ERR(dev,
1698 "create qp: failed RQ shadow memory allocation\n");
1699 kfree(qp->wqe_wr_id);
1700 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001701 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001702
Amrani, Ramdf158562016-12-22 14:52:24 +02001703 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001704
Amrani, Ramdf158562016-12-22 14:52:24 +02001705 n_sq_entries = attrs->cap.max_send_wr;
1706 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1707 n_sq_entries = max_t(u32, n_sq_entries, 1);
1708 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001709
Amrani, Ramdf158562016-12-22 14:52:24 +02001710 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1711
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001712 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1713 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1714 n_sq_elems, n_rq_elems);
1715 else
1716 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1717 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001718 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001719 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001720
1721 return rc;
1722}
1723
1724struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1725 struct ib_qp_init_attr *attrs,
1726 struct ib_udata *udata)
1727{
1728 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001729 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001730 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001731 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001732 int rc = 0;
1733
1734 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1735 udata ? "user library" : "kernel", pd);
1736
1737 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1738 if (rc)
1739 return ERR_PTR(rc);
1740
Wei Yongjun181d8012016-10-28 16:33:47 +00001741 if (attrs->srq)
1742 return ERR_PTR(-EINVAL);
1743
Ram Amranicecbcdd2016-10-10 13:15:34 +03001744 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001745 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1746 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001747 get_qedr_cq(attrs->send_cq),
1748 get_qedr_cq(attrs->send_cq)->icid,
1749 get_qedr_cq(attrs->recv_cq),
1750 get_qedr_cq(attrs->recv_cq)->icid);
1751
Amrani, Ramdf158562016-12-22 14:52:24 +02001752 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1753 if (!qp) {
1754 DP_ERR(dev, "create qp: failed allocating memory\n");
1755 return ERR_PTR(-ENOMEM);
1756 }
1757
1758 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001759
Ram Amrani04886772016-10-10 13:15:38 +03001760 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001761 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1762 if (IS_ERR(ibqp))
1763 kfree(qp);
1764 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001765 }
1766
Amrani, Ramdf158562016-12-22 14:52:24 +02001767 if (udata)
1768 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1769 else
1770 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001771
Amrani, Ramdf158562016-12-22 14:52:24 +02001772 if (rc)
1773 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001774
Ram Amranicecbcdd2016-10-10 13:15:34 +03001775 qp->ibqp.qp_num = qp->qp_id;
1776
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001777 rc = qedr_idr_add(dev, qp, qp->qp_id);
1778 if (rc)
1779 goto err;
1780
Ram Amranicecbcdd2016-10-10 13:15:34 +03001781 return &qp->ibqp;
1782
Amrani, Ramdf158562016-12-22 14:52:24 +02001783err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001784 kfree(qp);
1785
1786 return ERR_PTR(-EFAULT);
1787}
1788
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001789static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001790{
1791 switch (qp_state) {
1792 case QED_ROCE_QP_STATE_RESET:
1793 return IB_QPS_RESET;
1794 case QED_ROCE_QP_STATE_INIT:
1795 return IB_QPS_INIT;
1796 case QED_ROCE_QP_STATE_RTR:
1797 return IB_QPS_RTR;
1798 case QED_ROCE_QP_STATE_RTS:
1799 return IB_QPS_RTS;
1800 case QED_ROCE_QP_STATE_SQD:
1801 return IB_QPS_SQD;
1802 case QED_ROCE_QP_STATE_ERR:
1803 return IB_QPS_ERR;
1804 case QED_ROCE_QP_STATE_SQE:
1805 return IB_QPS_SQE;
1806 }
1807 return IB_QPS_ERR;
1808}
1809
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001810static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1811 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001812{
1813 switch (qp_state) {
1814 case IB_QPS_RESET:
1815 return QED_ROCE_QP_STATE_RESET;
1816 case IB_QPS_INIT:
1817 return QED_ROCE_QP_STATE_INIT;
1818 case IB_QPS_RTR:
1819 return QED_ROCE_QP_STATE_RTR;
1820 case IB_QPS_RTS:
1821 return QED_ROCE_QP_STATE_RTS;
1822 case IB_QPS_SQD:
1823 return QED_ROCE_QP_STATE_SQD;
1824 case IB_QPS_ERR:
1825 return QED_ROCE_QP_STATE_ERR;
1826 default:
1827 return QED_ROCE_QP_STATE_ERR;
1828 }
1829}
1830
1831static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1832{
1833 qed_chain_reset(&qph->pbl);
1834 qph->prod = 0;
1835 qph->cons = 0;
1836 qph->wqe_cons = 0;
1837 qph->db_data.data.value = cpu_to_le16(0);
1838}
1839
1840static int qedr_update_qp_state(struct qedr_dev *dev,
1841 struct qedr_qp *qp,
1842 enum qed_roce_qp_state new_state)
1843{
1844 int status = 0;
1845
1846 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001847 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001848
1849 switch (qp->state) {
1850 case QED_ROCE_QP_STATE_RESET:
1851 switch (new_state) {
1852 case QED_ROCE_QP_STATE_INIT:
1853 qp->prev_wqe_size = 0;
1854 qedr_reset_qp_hwq_info(&qp->sq);
1855 qedr_reset_qp_hwq_info(&qp->rq);
1856 break;
1857 default:
1858 status = -EINVAL;
1859 break;
1860 };
1861 break;
1862 case QED_ROCE_QP_STATE_INIT:
1863 switch (new_state) {
1864 case QED_ROCE_QP_STATE_RTR:
1865 /* Update doorbell (in case post_recv was
1866 * done before move to RTR)
1867 */
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001868
1869 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1870 wmb();
1871 writel(qp->rq.db_data.raw, qp->rq.db);
1872 /* Make sure write takes effect */
1873 mmiowb();
1874 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001875 break;
1876 case QED_ROCE_QP_STATE_ERR:
1877 break;
1878 default:
1879 /* Invalid state change. */
1880 status = -EINVAL;
1881 break;
1882 };
1883 break;
1884 case QED_ROCE_QP_STATE_RTR:
1885 /* RTR->XXX */
1886 switch (new_state) {
1887 case QED_ROCE_QP_STATE_RTS:
1888 break;
1889 case QED_ROCE_QP_STATE_ERR:
1890 break;
1891 default:
1892 /* Invalid state change. */
1893 status = -EINVAL;
1894 break;
1895 };
1896 break;
1897 case QED_ROCE_QP_STATE_RTS:
1898 /* RTS->XXX */
1899 switch (new_state) {
1900 case QED_ROCE_QP_STATE_SQD:
1901 break;
1902 case QED_ROCE_QP_STATE_ERR:
1903 break;
1904 default:
1905 /* Invalid state change. */
1906 status = -EINVAL;
1907 break;
1908 };
1909 break;
1910 case QED_ROCE_QP_STATE_SQD:
1911 /* SQD->XXX */
1912 switch (new_state) {
1913 case QED_ROCE_QP_STATE_RTS:
1914 case QED_ROCE_QP_STATE_ERR:
1915 break;
1916 default:
1917 /* Invalid state change. */
1918 status = -EINVAL;
1919 break;
1920 };
1921 break;
1922 case QED_ROCE_QP_STATE_ERR:
1923 /* ERR->XXX */
1924 switch (new_state) {
1925 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001926 if ((qp->rq.prod != qp->rq.cons) ||
1927 (qp->sq.prod != qp->sq.cons)) {
1928 DP_NOTICE(dev,
1929 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1930 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1931 qp->sq.cons);
1932 status = -EINVAL;
1933 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001934 break;
1935 default:
1936 status = -EINVAL;
1937 break;
1938 };
1939 break;
1940 default:
1941 status = -EINVAL;
1942 break;
1943 };
1944
1945 return status;
1946}
1947
1948int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1949 int attr_mask, struct ib_udata *udata)
1950{
1951 struct qedr_qp *qp = get_qedr_qp(ibqp);
1952 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1953 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001954 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001955 enum ib_qp_state old_qp_state, new_qp_state;
1956 int rc = 0;
1957
1958 DP_DEBUG(dev, QEDR_MSG_QP,
1959 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1960 attr->qp_state);
1961
1962 old_qp_state = qedr_get_ibqp_state(qp->state);
1963 if (attr_mask & IB_QP_STATE)
1964 new_qp_state = attr->qp_state;
1965 else
1966 new_qp_state = old_qp_state;
1967
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001968 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1969 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
1970 ibqp->qp_type, attr_mask,
1971 IB_LINK_LAYER_ETHERNET)) {
1972 DP_ERR(dev,
1973 "modify qp: invalid attribute mask=0x%x specified for\n"
1974 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1975 attr_mask, qp->qp_id, ibqp->qp_type,
1976 old_qp_state, new_qp_state);
1977 rc = -EINVAL;
1978 goto err;
1979 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001980 }
1981
1982 /* Translate the masks... */
1983 if (attr_mask & IB_QP_STATE) {
1984 SET_FIELD(qp_params.modify_flags,
1985 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1986 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1987 }
1988
1989 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1990 qp_params.sqd_async = true;
1991
1992 if (attr_mask & IB_QP_PKEY_INDEX) {
1993 SET_FIELD(qp_params.modify_flags,
1994 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1995 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1996 rc = -EINVAL;
1997 goto err;
1998 }
1999
2000 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2001 }
2002
2003 if (attr_mask & IB_QP_QKEY)
2004 qp->qkey = attr->qkey;
2005
2006 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2007 SET_FIELD(qp_params.modify_flags,
2008 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2009 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2010 IB_ACCESS_REMOTE_READ;
2011 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2012 IB_ACCESS_REMOTE_WRITE;
2013 qp_params.incoming_atomic_en = attr->qp_access_flags &
2014 IB_ACCESS_REMOTE_ATOMIC;
2015 }
2016
2017 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2018 if (attr_mask & IB_QP_PATH_MTU) {
2019 if (attr->path_mtu < IB_MTU_256 ||
2020 attr->path_mtu > IB_MTU_4096) {
2021 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2022 rc = -EINVAL;
2023 goto err;
2024 }
2025 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2026 ib_mtu_enum_to_int(iboe_get_mtu
2027 (dev->ndev->mtu)));
2028 }
2029
2030 if (!qp->mtu) {
2031 qp->mtu =
2032 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2033 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2034 }
2035
2036 SET_FIELD(qp_params.modify_flags,
2037 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2038
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002039 qp_params.traffic_class_tos = grh->traffic_class;
2040 qp_params.flow_label = grh->flow_label;
2041 qp_params.hop_limit_ttl = grh->hop_limit;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002042
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002043 qp->sgid_idx = grh->sgid_index;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002044
2045 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2046 if (rc) {
2047 DP_ERR(dev,
2048 "modify qp: problems with GID index %d (rc=%d)\n",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002049 grh->sgid_index, rc);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002050 return rc;
2051 }
2052
2053 rc = qedr_get_dmac(dev, &attr->ah_attr,
2054 qp_params.remote_mac_addr);
2055 if (rc)
2056 return rc;
2057
2058 qp_params.use_local_mac = true;
2059 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2060
2061 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2062 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2063 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2064 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2065 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2066 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2067 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2068 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002069
2070 qp_params.mtu = qp->mtu;
2071 qp_params.lb_indication = false;
2072 }
2073
2074 if (!qp_params.mtu) {
2075 /* Stay with current MTU */
2076 if (qp->mtu)
2077 qp_params.mtu = qp->mtu;
2078 else
2079 qp_params.mtu =
2080 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2081 }
2082
2083 if (attr_mask & IB_QP_TIMEOUT) {
2084 SET_FIELD(qp_params.modify_flags,
2085 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2086
2087 qp_params.ack_timeout = attr->timeout;
2088 if (attr->timeout) {
2089 u32 temp;
2090
2091 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
2092 /* FW requires [msec] */
2093 qp_params.ack_timeout = temp;
2094 } else {
2095 /* Infinite */
2096 qp_params.ack_timeout = 0;
2097 }
2098 }
2099 if (attr_mask & IB_QP_RETRY_CNT) {
2100 SET_FIELD(qp_params.modify_flags,
2101 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2102 qp_params.retry_cnt = attr->retry_cnt;
2103 }
2104
2105 if (attr_mask & IB_QP_RNR_RETRY) {
2106 SET_FIELD(qp_params.modify_flags,
2107 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2108 qp_params.rnr_retry_cnt = attr->rnr_retry;
2109 }
2110
2111 if (attr_mask & IB_QP_RQ_PSN) {
2112 SET_FIELD(qp_params.modify_flags,
2113 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2114 qp_params.rq_psn = attr->rq_psn;
2115 qp->rq_psn = attr->rq_psn;
2116 }
2117
2118 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2119 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2120 rc = -EINVAL;
2121 DP_ERR(dev,
2122 "unsupported max_rd_atomic=%d, supported=%d\n",
2123 attr->max_rd_atomic,
2124 dev->attr.max_qp_req_rd_atomic_resc);
2125 goto err;
2126 }
2127
2128 SET_FIELD(qp_params.modify_flags,
2129 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2130 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2131 }
2132
2133 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2134 SET_FIELD(qp_params.modify_flags,
2135 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2136 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2137 }
2138
2139 if (attr_mask & IB_QP_SQ_PSN) {
2140 SET_FIELD(qp_params.modify_flags,
2141 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2142 qp_params.sq_psn = attr->sq_psn;
2143 qp->sq_psn = attr->sq_psn;
2144 }
2145
2146 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2147 if (attr->max_dest_rd_atomic >
2148 dev->attr.max_qp_resp_rd_atomic_resc) {
2149 DP_ERR(dev,
2150 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2151 attr->max_dest_rd_atomic,
2152 dev->attr.max_qp_resp_rd_atomic_resc);
2153
2154 rc = -EINVAL;
2155 goto err;
2156 }
2157
2158 SET_FIELD(qp_params.modify_flags,
2159 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2160 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2161 }
2162
2163 if (attr_mask & IB_QP_DEST_QPN) {
2164 SET_FIELD(qp_params.modify_flags,
2165 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2166
2167 qp_params.dest_qp = attr->dest_qp_num;
2168 qp->dest_qp_num = attr->dest_qp_num;
2169 }
2170
2171 if (qp->qp_type != IB_QPT_GSI)
2172 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2173 qp->qed_qp, &qp_params);
2174
2175 if (attr_mask & IB_QP_STATE) {
2176 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02002177 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002178 qp->state = qp_params.new_state;
2179 }
2180
2181err:
2182 return rc;
2183}
2184
2185static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2186{
2187 int ib_qp_acc_flags = 0;
2188
2189 if (params->incoming_rdma_write_en)
2190 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2191 if (params->incoming_rdma_read_en)
2192 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2193 if (params->incoming_atomic_en)
2194 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2195 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2196 return ib_qp_acc_flags;
2197}
2198
2199int qedr_query_qp(struct ib_qp *ibqp,
2200 struct ib_qp_attr *qp_attr,
2201 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2202{
2203 struct qed_rdma_query_qp_out_params params;
2204 struct qedr_qp *qp = get_qedr_qp(ibqp);
2205 struct qedr_dev *dev = qp->dev;
2206 int rc = 0;
2207
2208 memset(&params, 0, sizeof(params));
2209
2210 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2211 if (rc)
2212 goto err;
2213
2214 memset(qp_attr, 0, sizeof(*qp_attr));
2215 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2216
2217 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2218 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002219 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002220 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2221 qp_attr->rq_psn = params.rq_psn;
2222 qp_attr->sq_psn = params.sq_psn;
2223 qp_attr->dest_qp_num = params.dest_qp;
2224
2225 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2226
2227 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2228 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2229 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2230 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002231 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002232 qp_init_attr->cap = qp_attr->cap;
2233
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04002234 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002235 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2236 params.flow_label, qp->sgid_idx,
2237 params.hop_limit_ttl, params.traffic_class_tos);
2238 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2239 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2240 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002241 qp_attr->timeout = params.timeout;
2242 qp_attr->rnr_retry = params.rnr_retry;
2243 qp_attr->retry_cnt = params.retry_cnt;
2244 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2245 qp_attr->pkey_index = params.pkey_index;
2246 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002247 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2248 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002249 qp_attr->alt_pkey_index = 0;
2250 qp_attr->alt_port_num = 0;
2251 qp_attr->alt_timeout = 0;
2252 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2253
2254 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2255 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2256 qp_attr->max_rd_atomic = params.max_rd_atomic;
2257 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2258
2259 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2260 qp_attr->cap.max_inline_data);
2261
2262err:
2263 return rc;
2264}
2265
Amrani, Ramdf158562016-12-22 14:52:24 +02002266int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2267{
2268 int rc = 0;
2269
2270 if (qp->qp_type != IB_QPT_GSI) {
2271 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2272 if (rc)
2273 return rc;
2274 }
2275
2276 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2277 qedr_cleanup_user(dev, qp);
2278 else
2279 qedr_cleanup_kernel(dev, qp);
2280
2281 return 0;
2282}
2283
Ram Amranicecbcdd2016-10-10 13:15:34 +03002284int qedr_destroy_qp(struct ib_qp *ibqp)
2285{
2286 struct qedr_qp *qp = get_qedr_qp(ibqp);
2287 struct qedr_dev *dev = qp->dev;
2288 struct ib_qp_attr attr;
2289 int attr_mask = 0;
2290 int rc = 0;
2291
2292 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2293 qp, qp->qp_type);
2294
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002295 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2296 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2297 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2298 (qp->state != QED_ROCE_QP_STATE_INIT)) {
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002299
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002300 attr.qp_state = IB_QPS_ERR;
2301 attr_mask |= IB_QP_STATE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002302
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002303 /* Change the QP state to ERROR */
2304 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2305 }
Kalderon, Michale411e052017-07-26 14:41:56 +03002306 } else {
2307 /* Wait for the connect/accept to complete */
2308 if (qp->ep) {
2309 int wait_count = 1;
2310
2311 while (qp->ep->during_connect) {
2312 DP_DEBUG(dev, QEDR_MSG_QP,
2313 "Still in during connect/accept\n");
2314
2315 msleep(100);
2316 if (wait_count++ > 200) {
2317 DP_NOTICE(dev,
2318 "during connect timeout\n");
2319 break;
2320 }
2321 }
2322 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002323 }
2324
Amrani, Ramdf158562016-12-22 14:52:24 +02002325 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002326 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002327
Amrani, Ramdf158562016-12-22 14:52:24 +02002328 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002329
Kalderon, Michalde0089e2017-07-26 14:41:55 +03002330 if (atomic_dec_and_test(&qp->refcnt)) {
2331 qedr_idr_remove(dev, qp->qp_id);
2332 kfree(qp);
2333 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002334 return rc;
2335}
Ram Amranie0290cc2016-10-10 13:15:35 +03002336
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002337struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002338 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002339{
2340 struct qedr_ah *ah;
2341
2342 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2343 if (!ah)
2344 return ERR_PTR(-ENOMEM);
2345
2346 ah->attr = *attr;
2347
2348 return &ah->ibah;
2349}
2350
2351int qedr_destroy_ah(struct ib_ah *ibah)
2352{
2353 struct qedr_ah *ah = get_qedr_ah(ibah);
2354
2355 kfree(ah);
2356 return 0;
2357}
2358
Ram Amranie0290cc2016-10-10 13:15:35 +03002359static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2360{
2361 struct qedr_pbl *pbl, *tmp;
2362
2363 if (info->pbl_table)
2364 list_add_tail(&info->pbl_table->list_entry,
2365 &info->free_pbl_list);
2366
2367 if (!list_empty(&info->inuse_pbl_list))
2368 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2369
2370 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2371 list_del(&pbl->list_entry);
2372 qedr_free_pbl(dev, &info->pbl_info, pbl);
2373 }
2374}
2375
2376static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2377 size_t page_list_len, bool two_layered)
2378{
2379 struct qedr_pbl *tmp;
2380 int rc;
2381
2382 INIT_LIST_HEAD(&info->free_pbl_list);
2383 INIT_LIST_HEAD(&info->inuse_pbl_list);
2384
2385 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2386 page_list_len, two_layered);
2387 if (rc)
2388 goto done;
2389
2390 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002391 if (IS_ERR(info->pbl_table)) {
2392 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002393 goto done;
2394 }
2395
2396 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2397 &info->pbl_table->pa);
2398
2399 /* in usual case we use 2 PBLs, so we add one to free
2400 * list and allocating another one
2401 */
2402 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002403 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002404 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2405 goto done;
2406 }
2407
2408 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2409
2410 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2411
2412done:
2413 if (rc)
2414 free_mr_info(dev, info);
2415
2416 return rc;
2417}
2418
2419struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2420 u64 usr_addr, int acc, struct ib_udata *udata)
2421{
2422 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2423 struct qedr_mr *mr;
2424 struct qedr_pd *pd;
2425 int rc = -ENOMEM;
2426
2427 pd = get_qedr_pd(ibpd);
2428 DP_DEBUG(dev, QEDR_MSG_MR,
2429 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2430 pd->pd_id, start, len, usr_addr, acc);
2431
2432 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2433 return ERR_PTR(-EINVAL);
2434
2435 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2436 if (!mr)
2437 return ERR_PTR(rc);
2438
2439 mr->type = QEDR_MR_USER;
2440
2441 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2442 if (IS_ERR(mr->umem)) {
2443 rc = -EFAULT;
2444 goto err0;
2445 }
2446
2447 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2448 if (rc)
2449 goto err1;
2450
2451 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
Ram Amranie57bb6b2017-06-05 16:32:27 +03002452 &mr->info.pbl_info, mr->umem->page_shift);
Ram Amranie0290cc2016-10-10 13:15:35 +03002453
2454 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2455 if (rc) {
2456 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2457 goto err1;
2458 }
2459
2460 /* Index only, 18 bit long, lkey = itid << 8 | key */
2461 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2462 mr->hw_mr.key = 0;
2463 mr->hw_mr.pd = pd->pd_id;
2464 mr->hw_mr.local_read = 1;
2465 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2466 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2467 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2468 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2469 mr->hw_mr.mw_bind = false;
2470 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2471 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2472 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002473 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002474 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2475 mr->hw_mr.length = len;
2476 mr->hw_mr.vaddr = usr_addr;
2477 mr->hw_mr.zbva = false;
2478 mr->hw_mr.phy_mr = false;
2479 mr->hw_mr.dma_mr = false;
2480
2481 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2482 if (rc) {
2483 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2484 goto err2;
2485 }
2486
2487 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2488 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2489 mr->hw_mr.remote_atomic)
2490 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2491
2492 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2493 mr->ibmr.lkey);
2494 return &mr->ibmr;
2495
2496err2:
2497 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2498err1:
2499 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2500err0:
2501 kfree(mr);
2502 return ERR_PTR(rc);
2503}
2504
2505int qedr_dereg_mr(struct ib_mr *ib_mr)
2506{
2507 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2508 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2509 int rc = 0;
2510
2511 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2512 if (rc)
2513 return rc;
2514
2515 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2516
2517 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2518 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2519
2520 /* it could be user registered memory. */
2521 if (mr->umem)
2522 ib_umem_release(mr->umem);
2523
2524 kfree(mr);
2525
2526 return rc;
2527}
2528
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002529static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2530 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002531{
2532 struct qedr_pd *pd = get_qedr_pd(ibpd);
2533 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2534 struct qedr_mr *mr;
2535 int rc = -ENOMEM;
2536
2537 DP_DEBUG(dev, QEDR_MSG_MR,
2538 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2539 max_page_list_len);
2540
2541 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2542 if (!mr)
2543 return ERR_PTR(rc);
2544
2545 mr->dev = dev;
2546 mr->type = QEDR_MR_FRMR;
2547
2548 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2549 if (rc)
2550 goto err0;
2551
2552 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2553 if (rc) {
2554 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2555 goto err0;
2556 }
2557
2558 /* Index only, 18 bit long, lkey = itid << 8 | key */
2559 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2560 mr->hw_mr.key = 0;
2561 mr->hw_mr.pd = pd->pd_id;
2562 mr->hw_mr.local_read = 1;
2563 mr->hw_mr.local_write = 0;
2564 mr->hw_mr.remote_read = 0;
2565 mr->hw_mr.remote_write = 0;
2566 mr->hw_mr.remote_atomic = 0;
2567 mr->hw_mr.mw_bind = false;
2568 mr->hw_mr.pbl_ptr = 0;
2569 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2570 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2571 mr->hw_mr.fbo = 0;
2572 mr->hw_mr.length = 0;
2573 mr->hw_mr.vaddr = 0;
2574 mr->hw_mr.zbva = false;
2575 mr->hw_mr.phy_mr = true;
2576 mr->hw_mr.dma_mr = false;
2577
2578 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2579 if (rc) {
2580 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2581 goto err1;
2582 }
2583
2584 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2585 mr->ibmr.rkey = mr->ibmr.lkey;
2586
2587 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2588 return mr;
2589
2590err1:
2591 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2592err0:
2593 kfree(mr);
2594 return ERR_PTR(rc);
2595}
2596
2597struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2598 enum ib_mr_type mr_type, u32 max_num_sg)
2599{
2600 struct qedr_dev *dev;
2601 struct qedr_mr *mr;
2602
2603 if (mr_type != IB_MR_TYPE_MEM_REG)
2604 return ERR_PTR(-EINVAL);
2605
2606 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2607
2608 if (IS_ERR(mr))
2609 return ERR_PTR(-EINVAL);
2610
2611 dev = mr->dev;
2612
2613 return &mr->ibmr;
2614}
2615
2616static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2617{
2618 struct qedr_mr *mr = get_qedr_mr(ibmr);
2619 struct qedr_pbl *pbl_table;
2620 struct regpair *pbe;
2621 u32 pbes_in_page;
2622
2623 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2624 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2625 return -ENOMEM;
2626 }
2627
2628 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2629 mr->npages, addr);
2630
2631 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2632 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2633 pbe = (struct regpair *)pbl_table->va;
2634 pbe += mr->npages % pbes_in_page;
2635 pbe->lo = cpu_to_le32((u32)addr);
2636 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2637
2638 mr->npages++;
2639
2640 return 0;
2641}
2642
2643static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2644{
2645 int work = info->completed - info->completed_handled - 1;
2646
2647 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2648 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2649 struct qedr_pbl *pbl;
2650
2651 /* Free all the page list that are possible to be freed
2652 * (all the ones that were invalidated), under the assumption
2653 * that if an FMR was completed successfully that means that
2654 * if there was an invalidate operation before it also ended
2655 */
2656 pbl = list_first_entry(&info->inuse_pbl_list,
2657 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002658 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002659 info->completed_handled++;
2660 }
2661}
2662
2663int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2664 int sg_nents, unsigned int *sg_offset)
2665{
2666 struct qedr_mr *mr = get_qedr_mr(ibmr);
2667
2668 mr->npages = 0;
2669
2670 handle_completed_mrs(mr->dev, &mr->info);
2671 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2672}
2673
2674struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2675{
2676 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2677 struct qedr_pd *pd = get_qedr_pd(ibpd);
2678 struct qedr_mr *mr;
2679 int rc;
2680
2681 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2682 if (!mr)
2683 return ERR_PTR(-ENOMEM);
2684
2685 mr->type = QEDR_MR_DMA;
2686
2687 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2688 if (rc) {
2689 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2690 goto err1;
2691 }
2692
2693 /* index only, 18 bit long, lkey = itid << 8 | key */
2694 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2695 mr->hw_mr.pd = pd->pd_id;
2696 mr->hw_mr.local_read = 1;
2697 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2698 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2699 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2700 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2701 mr->hw_mr.dma_mr = true;
2702
2703 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2704 if (rc) {
2705 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2706 goto err2;
2707 }
2708
2709 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2710 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2711 mr->hw_mr.remote_atomic)
2712 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2713
2714 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2715 return &mr->ibmr;
2716
2717err2:
2718 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2719err1:
2720 kfree(mr);
2721 return ERR_PTR(rc);
2722}
Ram Amraniafa0e132016-10-10 13:15:36 +03002723
2724static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2725{
2726 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2727}
2728
2729static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2730{
2731 int i, len = 0;
2732
2733 for (i = 0; i < num_sge; i++)
2734 len += sg_list[i].length;
2735
2736 return len;
2737}
2738
2739static void swap_wqe_data64(u64 *p)
2740{
2741 int i;
2742
2743 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2744 *p = cpu_to_be64(cpu_to_le64(*p));
2745}
2746
2747static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2748 struct qedr_qp *qp, u8 *wqe_size,
2749 struct ib_send_wr *wr,
2750 struct ib_send_wr **bad_wr, u8 *bits,
2751 u8 bit)
2752{
2753 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2754 char *seg_prt, *wqe;
2755 int i, seg_siz;
2756
2757 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2758 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2759 *bad_wr = wr;
2760 return 0;
2761 }
2762
2763 if (!data_size)
2764 return data_size;
2765
2766 *bits |= bit;
2767
2768 seg_prt = NULL;
2769 wqe = NULL;
2770 seg_siz = 0;
2771
2772 /* Copy data inline */
2773 for (i = 0; i < wr->num_sge; i++) {
2774 u32 len = wr->sg_list[i].length;
2775 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2776
2777 while (len > 0) {
2778 u32 cur;
2779
2780 /* New segment required */
2781 if (!seg_siz) {
2782 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2783 seg_prt = wqe;
2784 seg_siz = sizeof(struct rdma_sq_common_wqe);
2785 (*wqe_size)++;
2786 }
2787
2788 /* Calculate currently allowed length */
2789 cur = min_t(u32, len, seg_siz);
2790 memcpy(seg_prt, src, cur);
2791
2792 /* Update segment variables */
2793 seg_prt += cur;
2794 seg_siz -= cur;
2795
2796 /* Update sge variables */
2797 src += cur;
2798 len -= cur;
2799
2800 /* Swap fully-completed segments */
2801 if (!seg_siz)
2802 swap_wqe_data64((u64 *)wqe);
2803 }
2804 }
2805
2806 /* swap last not completed segment */
2807 if (seg_siz)
2808 swap_wqe_data64((u64 *)wqe);
2809
2810 return data_size;
2811}
2812
2813#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2814 do { \
2815 DMA_REGPAIR_LE(sge->addr, vaddr); \
2816 (sge)->length = cpu_to_le32(vlength); \
2817 (sge)->flags = cpu_to_le32(vflags); \
2818 } while (0)
2819
2820#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2821 do { \
2822 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2823 (hdr)->num_sges = num_sge; \
2824 } while (0)
2825
2826#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2827 do { \
2828 DMA_REGPAIR_LE(sge->addr, vaddr); \
2829 (sge)->length = cpu_to_le32(vlength); \
2830 (sge)->l_key = cpu_to_le32(vlkey); \
2831 } while (0)
2832
2833static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2834 struct ib_send_wr *wr)
2835{
2836 u32 data_size = 0;
2837 int i;
2838
2839 for (i = 0; i < wr->num_sge; i++) {
2840 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2841
2842 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2843 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2844 sge->length = cpu_to_le32(wr->sg_list[i].length);
2845 data_size += wr->sg_list[i].length;
2846 }
2847
2848 if (wqe_size)
2849 *wqe_size += wr->num_sge;
2850
2851 return data_size;
2852}
2853
2854static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2855 struct qedr_qp *qp,
2856 struct rdma_sq_rdma_wqe_1st *rwqe,
2857 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2858 struct ib_send_wr *wr,
2859 struct ib_send_wr **bad_wr)
2860{
2861 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2862 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2863
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002864 if (wr->send_flags & IB_SEND_INLINE &&
2865 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2866 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002867 u8 flags = 0;
2868
2869 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2870 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2871 bad_wr, &rwqe->flags, flags);
2872 }
2873
2874 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2875}
2876
2877static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2878 struct qedr_qp *qp,
2879 struct rdma_sq_send_wqe_1st *swqe,
2880 struct rdma_sq_send_wqe_2st *swqe2,
2881 struct ib_send_wr *wr,
2882 struct ib_send_wr **bad_wr)
2883{
2884 memset(swqe2, 0, sizeof(*swqe2));
2885 if (wr->send_flags & IB_SEND_INLINE) {
2886 u8 flags = 0;
2887
2888 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2889 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2890 bad_wr, &swqe->flags, flags);
2891 }
2892
2893 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2894}
2895
2896static int qedr_prepare_reg(struct qedr_qp *qp,
2897 struct rdma_sq_fmr_wqe_1st *fwqe1,
2898 struct ib_reg_wr *wr)
2899{
2900 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2901 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2902
2903 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2904 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2905 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2906 fwqe1->l_key = wr->key;
2907
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002908 fwqe2->access_ctrl = 0;
2909
Ram Amraniafa0e132016-10-10 13:15:36 +03002910 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2911 !!(wr->access & IB_ACCESS_REMOTE_READ));
2912 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2913 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2914 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2915 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2916 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2917 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2918 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2919 fwqe2->fmr_ctrl = 0;
2920
2921 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2922 ilog2(mr->ibmr.page_size) - 12);
2923
2924 fwqe2->length_hi = 0;
2925 fwqe2->length_lo = mr->ibmr.length;
2926 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2927 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2928
2929 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2930
2931 return 0;
2932}
2933
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002934static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002935{
2936 switch (opcode) {
2937 case IB_WR_RDMA_WRITE:
2938 case IB_WR_RDMA_WRITE_WITH_IMM:
2939 return IB_WC_RDMA_WRITE;
2940 case IB_WR_SEND_WITH_IMM:
2941 case IB_WR_SEND:
2942 case IB_WR_SEND_WITH_INV:
2943 return IB_WC_SEND;
2944 case IB_WR_RDMA_READ:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03002945 case IB_WR_RDMA_READ_WITH_INV:
Ram Amraniafa0e132016-10-10 13:15:36 +03002946 return IB_WC_RDMA_READ;
2947 case IB_WR_ATOMIC_CMP_AND_SWP:
2948 return IB_WC_COMP_SWAP;
2949 case IB_WR_ATOMIC_FETCH_AND_ADD:
2950 return IB_WC_FETCH_ADD;
2951 case IB_WR_REG_MR:
2952 return IB_WC_REG_MR;
2953 case IB_WR_LOCAL_INV:
2954 return IB_WC_LOCAL_INV;
2955 default:
2956 return IB_WC_SEND;
2957 }
2958}
2959
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002960static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002961{
2962 int wq_is_full, err_wr, pbl_is_full;
2963 struct qedr_dev *dev = qp->dev;
2964
2965 /* prevent SQ overflow and/or processing of a bad WR */
2966 err_wr = wr->num_sge > qp->sq.max_sges;
2967 wq_is_full = qedr_wq_is_full(&qp->sq);
2968 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2969 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2970 if (wq_is_full || err_wr || pbl_is_full) {
2971 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2972 DP_ERR(dev,
2973 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2974 qp);
2975 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2976 }
2977
2978 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2979 DP_ERR(dev,
2980 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2981 qp);
2982 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2983 }
2984
2985 if (pbl_is_full &&
2986 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2987 DP_ERR(dev,
2988 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2989 qp);
2990 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2991 }
2992 return false;
2993 }
2994 return true;
2995}
2996
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002997static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03002998 struct ib_send_wr **bad_wr)
2999{
3000 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3001 struct qedr_qp *qp = get_qedr_qp(ibqp);
3002 struct rdma_sq_atomic_wqe_1st *awqe1;
3003 struct rdma_sq_atomic_wqe_2nd *awqe2;
3004 struct rdma_sq_atomic_wqe_3rd *awqe3;
3005 struct rdma_sq_send_wqe_2st *swqe2;
3006 struct rdma_sq_local_inv_wqe *iwqe;
3007 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3008 struct rdma_sq_send_wqe_1st *swqe;
3009 struct rdma_sq_rdma_wqe_1st *rwqe;
3010 struct rdma_sq_fmr_wqe_1st *fwqe1;
3011 struct rdma_sq_common_wqe *wqe;
3012 u32 length;
3013 int rc = 0;
3014 bool comp;
3015
3016 if (!qedr_can_post_send(qp, wr)) {
3017 *bad_wr = wr;
3018 return -ENOMEM;
3019 }
3020
3021 wqe = qed_chain_produce(&qp->sq.pbl);
3022 qp->wqe_wr_id[qp->sq.prod].signaled =
3023 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3024
3025 wqe->flags = 0;
3026 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3027 !!(wr->send_flags & IB_SEND_SOLICITED));
3028 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3029 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3030 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3031 !!(wr->send_flags & IB_SEND_FENCE));
3032 wqe->prev_wqe_size = qp->prev_wqe_size;
3033
3034 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3035
3036 switch (wr->opcode) {
3037 case IB_WR_SEND_WITH_IMM:
3038 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3039 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3040 swqe->wqe_size = 2;
3041 swqe2 = qed_chain_produce(&qp->sq.pbl);
3042
3043 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
3044 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3045 wr, bad_wr);
3046 swqe->length = cpu_to_le32(length);
3047 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3048 qp->prev_wqe_size = swqe->wqe_size;
3049 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3050 break;
3051 case IB_WR_SEND:
3052 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3053 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3054
3055 swqe->wqe_size = 2;
3056 swqe2 = qed_chain_produce(&qp->sq.pbl);
3057 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3058 wr, bad_wr);
3059 swqe->length = cpu_to_le32(length);
3060 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3061 qp->prev_wqe_size = swqe->wqe_size;
3062 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3063 break;
3064 case IB_WR_SEND_WITH_INV:
3065 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3066 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3067 swqe2 = qed_chain_produce(&qp->sq.pbl);
3068 swqe->wqe_size = 2;
3069 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3070 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3071 wr, bad_wr);
3072 swqe->length = cpu_to_le32(length);
3073 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3074 qp->prev_wqe_size = swqe->wqe_size;
3075 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3076 break;
3077
3078 case IB_WR_RDMA_WRITE_WITH_IMM:
3079 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3080 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3081
3082 rwqe->wqe_size = 2;
3083 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3084 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3085 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3086 wr, bad_wr);
3087 rwqe->length = cpu_to_le32(length);
3088 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3089 qp->prev_wqe_size = rwqe->wqe_size;
3090 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3091 break;
3092 case IB_WR_RDMA_WRITE:
3093 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3094 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3095
3096 rwqe->wqe_size = 2;
3097 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3098 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3099 wr, bad_wr);
3100 rwqe->length = cpu_to_le32(length);
3101 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3102 qp->prev_wqe_size = rwqe->wqe_size;
3103 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3104 break;
3105 case IB_WR_RDMA_READ_WITH_INV:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03003106 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3107 /* fallthrough... same is identical to RDMA READ */
Ram Amraniafa0e132016-10-10 13:15:36 +03003108
3109 case IB_WR_RDMA_READ:
3110 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3111 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3112
3113 rwqe->wqe_size = 2;
3114 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3115 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3116 wr, bad_wr);
3117 rwqe->length = cpu_to_le32(length);
3118 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3119 qp->prev_wqe_size = rwqe->wqe_size;
3120 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3121 break;
3122
3123 case IB_WR_ATOMIC_CMP_AND_SWP:
3124 case IB_WR_ATOMIC_FETCH_AND_ADD:
3125 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3126 awqe1->wqe_size = 4;
3127
3128 awqe2 = qed_chain_produce(&qp->sq.pbl);
3129 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3130 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3131
3132 awqe3 = qed_chain_produce(&qp->sq.pbl);
3133
3134 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3135 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3136 DMA_REGPAIR_LE(awqe3->swap_data,
3137 atomic_wr(wr)->compare_add);
3138 } else {
3139 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3140 DMA_REGPAIR_LE(awqe3->swap_data,
3141 atomic_wr(wr)->swap);
3142 DMA_REGPAIR_LE(awqe3->cmp_data,
3143 atomic_wr(wr)->compare_add);
3144 }
3145
3146 qedr_prepare_sq_sges(qp, NULL, wr);
3147
3148 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3149 qp->prev_wqe_size = awqe1->wqe_size;
3150 break;
3151
3152 case IB_WR_LOCAL_INV:
3153 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3154 iwqe->wqe_size = 1;
3155
3156 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3157 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3158 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3159 qp->prev_wqe_size = iwqe->wqe_size;
3160 break;
3161 case IB_WR_REG_MR:
3162 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3163 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3164 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3165 fwqe1->wqe_size = 2;
3166
3167 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3168 if (rc) {
3169 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3170 *bad_wr = wr;
3171 break;
3172 }
3173
3174 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3175 qp->prev_wqe_size = fwqe1->wqe_size;
3176 break;
3177 default:
3178 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3179 rc = -EINVAL;
3180 *bad_wr = wr;
3181 break;
3182 }
3183
3184 if (*bad_wr) {
3185 u16 value;
3186
3187 /* Restore prod to its position before
3188 * this WR was processed
3189 */
3190 value = le16_to_cpu(qp->sq.db_data.data.value);
3191 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3192
3193 /* Restore prev_wqe_size */
3194 qp->prev_wqe_size = wqe->prev_wqe_size;
3195 rc = -EINVAL;
3196 DP_ERR(dev, "POST SEND FAILED\n");
3197 }
3198
3199 return rc;
3200}
3201
3202int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3203 struct ib_send_wr **bad_wr)
3204{
3205 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3206 struct qedr_qp *qp = get_qedr_qp(ibqp);
3207 unsigned long flags;
3208 int rc = 0;
3209
3210 *bad_wr = NULL;
3211
Ram Amrani04886772016-10-10 13:15:38 +03003212 if (qp->qp_type == IB_QPT_GSI)
3213 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3214
Ram Amraniafa0e132016-10-10 13:15:36 +03003215 spin_lock_irqsave(&qp->q_lock, flags);
3216
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003217 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3218 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3219 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3220 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3221 spin_unlock_irqrestore(&qp->q_lock, flags);
3222 *bad_wr = wr;
3223 DP_DEBUG(dev, QEDR_MSG_CQ,
3224 "QP in wrong state! QP icid=0x%x state %d\n",
3225 qp->icid, qp->state);
3226 return -EINVAL;
3227 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003228 }
3229
Ram Amraniafa0e132016-10-10 13:15:36 +03003230 while (wr) {
3231 rc = __qedr_post_send(ibqp, wr, bad_wr);
3232 if (rc)
3233 break;
3234
3235 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3236
3237 qedr_inc_sw_prod(&qp->sq);
3238
3239 qp->sq.db_data.data.value++;
3240
3241 wr = wr->next;
3242 }
3243
3244 /* Trigger doorbell
3245 * If there was a failure in the first WR then it will be triggered in
3246 * vane. However this is not harmful (as long as the producer value is
3247 * unchanged). For performance reasons we avoid checking for this
3248 * redundant doorbell.
3249 */
3250 wmb();
3251 writel(qp->sq.db_data.raw, qp->sq.db);
3252
3253 /* Make sure write sticks */
3254 mmiowb();
3255
3256 spin_unlock_irqrestore(&qp->q_lock, flags);
3257
3258 return rc;
3259}
3260
3261int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3262 struct ib_recv_wr **bad_wr)
3263{
3264 struct qedr_qp *qp = get_qedr_qp(ibqp);
3265 struct qedr_dev *dev = qp->dev;
3266 unsigned long flags;
3267 int status = 0;
3268
Ram Amrani04886772016-10-10 13:15:38 +03003269 if (qp->qp_type == IB_QPT_GSI)
3270 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3271
Ram Amraniafa0e132016-10-10 13:15:36 +03003272 spin_lock_irqsave(&qp->q_lock, flags);
3273
Amrani, Ram922d9a42016-12-22 14:40:38 +02003274 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003275 spin_unlock_irqrestore(&qp->q_lock, flags);
3276 *bad_wr = wr;
3277 return -EINVAL;
3278 }
3279
3280 while (wr) {
3281 int i;
3282
3283 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3284 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3285 wr->num_sge > qp->rq.max_sges) {
3286 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3287 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3288 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3289 qp->rq.max_sges);
3290 status = -ENOMEM;
3291 *bad_wr = wr;
3292 break;
3293 }
3294 for (i = 0; i < wr->num_sge; i++) {
3295 u32 flags = 0;
3296 struct rdma_rq_sge *rqe =
3297 qed_chain_produce(&qp->rq.pbl);
3298
3299 /* First one must include the number
3300 * of SGE in the list
3301 */
3302 if (!i)
3303 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3304 wr->num_sge);
3305
3306 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3307 wr->sg_list[i].lkey);
3308
3309 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3310 wr->sg_list[i].length, flags);
3311 }
3312
3313 /* Special case of no sges. FW requires between 1-4 sges...
3314 * in this case we need to post 1 sge with length zero. this is
3315 * because rdma write with immediate consumes an RQ.
3316 */
3317 if (!wr->num_sge) {
3318 u32 flags = 0;
3319 struct rdma_rq_sge *rqe =
3320 qed_chain_produce(&qp->rq.pbl);
3321
3322 /* First one must include the number
3323 * of SGE in the list
3324 */
3325 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3326 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3327
3328 RQ_SGE_SET(rqe, 0, 0, flags);
3329 i = 1;
3330 }
3331
3332 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3333 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3334
3335 qedr_inc_sw_prod(&qp->rq);
3336
3337 /* Flush all the writes before signalling doorbell */
3338 wmb();
3339
3340 qp->rq.db_data.data.value++;
3341
3342 writel(qp->rq.db_data.raw, qp->rq.db);
3343
3344 /* Make sure write sticks */
3345 mmiowb();
3346
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003347 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3348 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3349 mmiowb(); /* for second doorbell */
3350 }
3351
Ram Amraniafa0e132016-10-10 13:15:36 +03003352 wr = wr->next;
3353 }
3354
3355 spin_unlock_irqrestore(&qp->q_lock, flags);
3356
3357 return status;
3358}
3359
3360static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3361{
3362 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3363
3364 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3365 cq->pbl_toggle;
3366}
3367
3368static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3369{
3370 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3371 struct qedr_qp *qp;
3372
3373 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3374 resp_cqe->qp_handle.lo,
3375 u64);
3376 return qp;
3377}
3378
3379static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3380{
3381 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3382
3383 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3384}
3385
3386/* Return latest CQE (needs processing) */
3387static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3388{
3389 return cq->latest_cqe;
3390}
3391
3392/* In fmr we need to increase the number of fmr completed counter for the fmr
3393 * algorithm determining whether we can free a pbl or not.
3394 * we need to perform this whether the work request was signaled or not. for
3395 * this purpose we call this function from the condition that checks if a wr
3396 * should be skipped, to make sure we don't miss it ( possibly this fmr
3397 * operation was not signalted)
3398 */
3399static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3400{
3401 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3402 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3403}
3404
3405static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3406 struct qedr_cq *cq, int num_entries,
3407 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3408 int force)
3409{
3410 u16 cnt = 0;
3411
3412 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3413 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3414 qedr_chk_if_fmr(qp);
3415 /* skip WC */
3416 goto next_cqe;
3417 }
3418
3419 /* fill WC */
3420 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003421 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003422 wc->wc_flags = 0;
3423 wc->src_qp = qp->id;
3424 wc->qp = &qp->ibqp;
3425
3426 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3427 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3428
3429 switch (wc->opcode) {
3430 case IB_WC_RDMA_WRITE:
3431 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3432 break;
3433 case IB_WC_COMP_SWAP:
3434 case IB_WC_FETCH_ADD:
3435 wc->byte_len = 8;
3436 break;
3437 case IB_WC_REG_MR:
3438 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3439 break;
Michal Kalderondac27382017-06-05 16:32:26 +03003440 case IB_WC_RDMA_READ:
3441 case IB_WC_SEND:
3442 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3443 break;
Ram Amraniafa0e132016-10-10 13:15:36 +03003444 default:
3445 break;
3446 }
3447
3448 num_entries--;
3449 wc++;
3450 cnt++;
3451next_cqe:
3452 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3453 qed_chain_consume(&qp->sq.pbl);
3454 qedr_inc_sw_cons(&qp->sq);
3455 }
3456
3457 return cnt;
3458}
3459
3460static int qedr_poll_cq_req(struct qedr_dev *dev,
3461 struct qedr_qp *qp, struct qedr_cq *cq,
3462 int num_entries, struct ib_wc *wc,
3463 struct rdma_cqe_requester *req)
3464{
3465 int cnt = 0;
3466
3467 switch (req->status) {
3468 case RDMA_CQE_REQ_STS_OK:
3469 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3470 IB_WC_SUCCESS, 0);
3471 break;
3472 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003473 if (qp->state != QED_ROCE_QP_STATE_ERR)
3474 DP_ERR(dev,
3475 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3476 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003477 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003478 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003479 break;
3480 default:
3481 /* process all WQE before the cosumer */
3482 qp->state = QED_ROCE_QP_STATE_ERR;
3483 cnt = process_req(dev, qp, cq, num_entries, wc,
3484 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3485 wc += cnt;
3486 /* if we have extra WC fill it with actual error info */
3487 if (cnt < num_entries) {
3488 enum ib_wc_status wc_status;
3489
3490 switch (req->status) {
3491 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3492 DP_ERR(dev,
3493 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3494 cq->icid, qp->icid);
3495 wc_status = IB_WC_BAD_RESP_ERR;
3496 break;
3497 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3498 DP_ERR(dev,
3499 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3500 cq->icid, qp->icid);
3501 wc_status = IB_WC_LOC_LEN_ERR;
3502 break;
3503 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3504 DP_ERR(dev,
3505 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3506 cq->icid, qp->icid);
3507 wc_status = IB_WC_LOC_QP_OP_ERR;
3508 break;
3509 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3510 DP_ERR(dev,
3511 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3512 cq->icid, qp->icid);
3513 wc_status = IB_WC_LOC_PROT_ERR;
3514 break;
3515 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3516 DP_ERR(dev,
3517 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3518 cq->icid, qp->icid);
3519 wc_status = IB_WC_MW_BIND_ERR;
3520 break;
3521 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3522 DP_ERR(dev,
3523 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3524 cq->icid, qp->icid);
3525 wc_status = IB_WC_REM_INV_REQ_ERR;
3526 break;
3527 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3528 DP_ERR(dev,
3529 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3530 cq->icid, qp->icid);
3531 wc_status = IB_WC_REM_ACCESS_ERR;
3532 break;
3533 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3534 DP_ERR(dev,
3535 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3536 cq->icid, qp->icid);
3537 wc_status = IB_WC_REM_OP_ERR;
3538 break;
3539 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3540 DP_ERR(dev,
3541 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3542 cq->icid, qp->icid);
3543 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3544 break;
3545 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3546 DP_ERR(dev,
3547 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3548 cq->icid, qp->icid);
3549 wc_status = IB_WC_RETRY_EXC_ERR;
3550 break;
3551 default:
3552 DP_ERR(dev,
3553 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3554 cq->icid, qp->icid);
3555 wc_status = IB_WC_GENERAL_ERR;
3556 }
3557 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3558 wc_status, 1);
3559 }
3560 }
3561
3562 return cnt;
3563}
3564
Amrani, Ramb6acd712017-04-27 13:35:35 +03003565static inline int qedr_cqe_resp_status_to_ib(u8 status)
3566{
3567 switch (status) {
3568 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3569 return IB_WC_LOC_ACCESS_ERR;
3570 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3571 return IB_WC_LOC_LEN_ERR;
3572 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3573 return IB_WC_LOC_QP_OP_ERR;
3574 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3575 return IB_WC_LOC_PROT_ERR;
3576 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3577 return IB_WC_MW_BIND_ERR;
3578 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3579 return IB_WC_REM_INV_RD_REQ_ERR;
3580 case RDMA_CQE_RESP_STS_OK:
3581 return IB_WC_SUCCESS;
3582 default:
3583 return IB_WC_GENERAL_ERR;
3584 }
3585}
3586
3587static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3588 struct ib_wc *wc)
3589{
3590 wc->status = IB_WC_SUCCESS;
3591 wc->byte_len = le32_to_cpu(resp->length);
3592
3593 if (resp->flags & QEDR_RESP_IMM) {
3594 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3595 wc->wc_flags |= IB_WC_WITH_IMM;
3596
3597 if (resp->flags & QEDR_RESP_RDMA)
3598 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3599
3600 if (resp->flags & QEDR_RESP_INV)
3601 return -EINVAL;
3602
3603 } else if (resp->flags & QEDR_RESP_INV) {
3604 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3605 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3606
3607 if (resp->flags & QEDR_RESP_RDMA)
3608 return -EINVAL;
3609
3610 } else if (resp->flags & QEDR_RESP_RDMA) {
3611 return -EINVAL;
3612 }
3613
3614 return 0;
3615}
3616
Ram Amraniafa0e132016-10-10 13:15:36 +03003617static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3618 struct qedr_cq *cq, struct ib_wc *wc,
3619 struct rdma_cqe_responder *resp, u64 wr_id)
3620{
Amrani, Ramb6acd712017-04-27 13:35:35 +03003621 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
Ram Amraniafa0e132016-10-10 13:15:36 +03003622 wc->opcode = IB_WC_RECV;
3623 wc->wc_flags = 0;
3624
Amrani, Ramb6acd712017-04-27 13:35:35 +03003625 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3626 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3627 DP_ERR(dev,
3628 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3629 cq, cq->icid, resp->flags);
Ram Amraniafa0e132016-10-10 13:15:36 +03003630
Amrani, Ramb6acd712017-04-27 13:35:35 +03003631 } else {
3632 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3633 if (wc->status == IB_WC_GENERAL_ERR)
3634 DP_ERR(dev,
3635 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3636 cq, cq->icid, resp->status);
Ram Amraniafa0e132016-10-10 13:15:36 +03003637 }
3638
Amrani, Ramb6acd712017-04-27 13:35:35 +03003639 /* Fill the rest of the WC */
Amrani, Ram27035a12016-12-22 14:40:39 +02003640 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003641 wc->src_qp = qp->id;
3642 wc->qp = &qp->ibqp;
3643 wc->wr_id = wr_id;
3644}
3645
3646static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3647 struct qedr_cq *cq, struct ib_wc *wc,
3648 struct rdma_cqe_responder *resp)
3649{
3650 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3651
3652 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3653
3654 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3655 qed_chain_consume(&qp->rq.pbl);
3656 qedr_inc_sw_cons(&qp->rq);
3657
3658 return 1;
3659}
3660
3661static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3662 int num_entries, struct ib_wc *wc, u16 hw_cons)
3663{
3664 u16 cnt = 0;
3665
3666 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3667 /* fill WC */
3668 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003669 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003670 wc->wc_flags = 0;
3671 wc->src_qp = qp->id;
3672 wc->byte_len = 0;
3673 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3674 wc->qp = &qp->ibqp;
3675 num_entries--;
3676 wc++;
3677 cnt++;
3678 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3679 qed_chain_consume(&qp->rq.pbl);
3680 qedr_inc_sw_cons(&qp->rq);
3681 }
3682
3683 return cnt;
3684}
3685
3686static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3687 struct rdma_cqe_responder *resp, int *update)
3688{
3689 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3690 consume_cqe(cq);
3691 *update |= 1;
3692 }
3693}
3694
3695static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3696 struct qedr_cq *cq, int num_entries,
3697 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3698 int *update)
3699{
3700 int cnt;
3701
3702 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3703 cnt = process_resp_flush(qp, cq, num_entries, wc,
3704 resp->rq_cons);
3705 try_consume_resp_cqe(cq, qp, resp, update);
3706 } else {
3707 cnt = process_resp_one(dev, qp, cq, wc, resp);
3708 consume_cqe(cq);
3709 *update |= 1;
3710 }
3711
3712 return cnt;
3713}
3714
3715static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3716 struct rdma_cqe_requester *req, int *update)
3717{
3718 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3719 consume_cqe(cq);
3720 *update |= 1;
3721 }
3722}
3723
3724int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3725{
3726 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3727 struct qedr_cq *cq = get_qedr_cq(ibcq);
3728 union rdma_cqe *cqe = cq->latest_cqe;
3729 u32 old_cons, new_cons;
3730 unsigned long flags;
3731 int update = 0;
3732 int done = 0;
3733
Amrani, Ram4dd72632017-04-27 13:35:34 +03003734 if (cq->destroyed) {
3735 DP_ERR(dev,
3736 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3737 cq, cq->icid);
3738 return 0;
3739 }
3740
Ram Amrani04886772016-10-10 13:15:38 +03003741 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3742 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3743
Ram Amraniafa0e132016-10-10 13:15:36 +03003744 spin_lock_irqsave(&cq->cq_lock, flags);
3745 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3746 while (num_entries && is_valid_cqe(cq, cqe)) {
3747 struct qedr_qp *qp;
3748 int cnt = 0;
3749
3750 /* prevent speculative reads of any field of CQE */
3751 rmb();
3752
3753 qp = cqe_get_qp(cqe);
3754 if (!qp) {
3755 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3756 break;
3757 }
3758
3759 wc->qp = &qp->ibqp;
3760
3761 switch (cqe_get_type(cqe)) {
3762 case RDMA_CQE_TYPE_REQUESTER:
3763 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3764 &cqe->req);
3765 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3766 break;
3767 case RDMA_CQE_TYPE_RESPONDER_RQ:
3768 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3769 &cqe->resp, &update);
3770 break;
3771 case RDMA_CQE_TYPE_INVALID:
3772 default:
3773 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3774 cqe_get_type(cqe));
3775 }
3776 num_entries -= cnt;
3777 wc += cnt;
3778 done += cnt;
3779
3780 cqe = get_cqe(cq);
3781 }
3782 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3783
3784 cq->cq_cons += new_cons - old_cons;
3785
3786 if (update)
3787 /* doorbell notifies abount latest VALID entry,
3788 * but chain already point to the next INVALID one
3789 */
3790 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3791
3792 spin_unlock_irqrestore(&cq->cq_lock, flags);
3793 return done;
3794}
Ram Amrani993d1b52016-10-10 13:15:39 +03003795
3796int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3797 u8 port_num,
3798 const struct ib_wc *in_wc,
3799 const struct ib_grh *in_grh,
3800 const struct ib_mad_hdr *mad_hdr,
3801 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3802 size_t *out_mad_size, u16 *out_mad_pkey_index)
3803{
3804 struct qedr_dev *dev = get_qedr_dev(ibdev);
3805
3806 DP_DEBUG(dev, QEDR_MSG_GSI,
3807 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3808 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3809 mad_hdr->class_specific, mad_hdr->class_version,
3810 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3811 return IB_MAD_RESULT_SUCCESS;
3812}