blob: 249b560a32d7351a137ce7fb4075fb76e330b448 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Kalderon, Michal99d195c2017-07-26 14:41:51 +030052#include "qedr_roce_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
Amrani, Ramc75d3ec2017-06-26 19:05:04 +030056static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
Ram Amrania7efd772016-10-10 13:15:33 +030064int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
Kalderon, Michale6a38c52017-07-26 14:41:52 +030073int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
Ram Amraniac1b36e2016-10-10 13:15:32 +030087int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
88 union ib_gid *sgid)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 int rc = 0;
92
93 if (!rdma_cap_roce_gid_table(ibdev, port))
94 return -ENODEV;
95
96 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
97 if (rc == -EAGAIN) {
98 memcpy(sgid, &zgid, sizeof(*sgid));
99 return 0;
100 }
101
102 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
103 sgid->global.interface_id, sgid->global.subnet_prefix);
104
105 return rc;
106}
107
108int qedr_add_gid(struct ib_device *device, u8 port_num,
109 unsigned int index, const union ib_gid *gid,
110 const struct ib_gid_attr *attr, void **context)
111{
112 if (!rdma_cap_roce_gid_table(device, port_num))
113 return -EINVAL;
114
115 if (port_num > QEDR_MAX_PORT)
116 return -EINVAL;
117
118 if (!context)
119 return -EINVAL;
120
121 return 0;
122}
123
124int qedr_del_gid(struct ib_device *device, u8 port_num,
125 unsigned int index, void **context)
126{
127 if (!rdma_cap_roce_gid_table(device, port_num))
128 return -EINVAL;
129
130 if (port_num > QEDR_MAX_PORT)
131 return -EINVAL;
132
133 if (!context)
134 return -EINVAL;
135
136 return 0;
137}
138
139int qedr_query_device(struct ib_device *ibdev,
140 struct ib_device_attr *attr, struct ib_udata *udata)
141{
142 struct qedr_dev *dev = get_qedr_dev(ibdev);
143 struct qedr_device_attr *qattr = &dev->attr;
144
145 if (!dev->rdma_ctx) {
146 DP_ERR(dev,
147 "qedr_query_device called with invalid params rdma_ctx=%p\n",
148 dev->rdma_ctx);
149 return -EINVAL;
150 }
151
152 memset(attr, 0, sizeof(*attr));
153
154 attr->fw_ver = qattr->fw_ver;
155 attr->sys_image_guid = qattr->sys_image_guid;
156 attr->max_mr_size = qattr->max_mr_size;
157 attr->page_size_cap = qattr->page_size_caps;
158 attr->vendor_id = qattr->vendor_id;
159 attr->vendor_part_id = qattr->vendor_part_id;
160 attr->hw_ver = qattr->hw_ver;
161 attr->max_qp = qattr->max_qp;
162 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
163 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
164 IB_DEVICE_RC_RNR_NAK_GEN |
165 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
166
167 attr->max_sge = qattr->max_sge;
168 attr->max_sge_rd = qattr->max_sge;
169 attr->max_cq = qattr->max_cq;
170 attr->max_cqe = qattr->max_cqe;
171 attr->max_mr = qattr->max_mr;
172 attr->max_mw = qattr->max_mw;
173 attr->max_pd = qattr->max_pd;
174 attr->atomic_cap = dev->atomic_cap;
175 attr->max_fmr = qattr->max_fmr;
176 attr->max_map_per_fmr = 16;
177 attr->max_qp_init_rd_atom =
178 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
179 attr->max_qp_rd_atom =
180 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
181 attr->max_qp_init_rd_atom);
182
183 attr->max_srq = qattr->max_srq;
184 attr->max_srq_sge = qattr->max_srq_sge;
185 attr->max_srq_wr = qattr->max_srq_wr;
186
187 attr->local_ca_ack_delay = qattr->dev_ack_delay;
188 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
189 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
190 attr->max_ah = qattr->max_ah;
191
192 return 0;
193}
194
195#define QEDR_SPEED_SDR (1)
196#define QEDR_SPEED_DDR (2)
197#define QEDR_SPEED_QDR (4)
198#define QEDR_SPEED_FDR10 (8)
199#define QEDR_SPEED_FDR (16)
200#define QEDR_SPEED_EDR (32)
201
202static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
203 u8 *ib_width)
204{
205 switch (speed) {
206 case 1000:
207 *ib_speed = QEDR_SPEED_SDR;
208 *ib_width = IB_WIDTH_1X;
209 break;
210 case 10000:
211 *ib_speed = QEDR_SPEED_QDR;
212 *ib_width = IB_WIDTH_1X;
213 break;
214
215 case 20000:
216 *ib_speed = QEDR_SPEED_DDR;
217 *ib_width = IB_WIDTH_4X;
218 break;
219
220 case 25000:
221 *ib_speed = QEDR_SPEED_EDR;
222 *ib_width = IB_WIDTH_1X;
223 break;
224
225 case 40000:
226 *ib_speed = QEDR_SPEED_QDR;
227 *ib_width = IB_WIDTH_4X;
228 break;
229
230 case 50000:
231 *ib_speed = QEDR_SPEED_QDR;
232 *ib_width = IB_WIDTH_4X;
233 break;
234
235 case 100000:
236 *ib_speed = QEDR_SPEED_EDR;
237 *ib_width = IB_WIDTH_4X;
238 break;
239
240 default:
241 /* Unsupported */
242 *ib_speed = QEDR_SPEED_SDR;
243 *ib_width = IB_WIDTH_1X;
244 }
245}
246
247int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
248{
249 struct qedr_dev *dev;
250 struct qed_rdma_port *rdma_port;
251
252 dev = get_qedr_dev(ibdev);
253 if (port > 1) {
254 DP_ERR(dev, "invalid_port=0x%x\n", port);
255 return -EINVAL;
256 }
257
258 if (!dev->rdma_ctx) {
259 DP_ERR(dev, "rdma_ctx is NULL\n");
260 return -EINVAL;
261 }
262
263 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300264
Or Gerlitzc4550c62017-01-24 13:02:39 +0200265 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300266 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
267 attr->state = IB_PORT_ACTIVE;
268 attr->phys_state = 5;
269 } else {
270 attr->state = IB_PORT_DOWN;
271 attr->phys_state = 3;
272 }
273 attr->max_mtu = IB_MTU_4096;
274 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
275 attr->lid = 0;
276 attr->lmc = 0;
277 attr->sm_lid = 0;
278 attr->sm_sl = 0;
279 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
Kalderon, Michalf5b1b172017-07-26 14:41:53 +0300280 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
281 attr->gid_tbl_len = 1;
282 attr->pkey_tbl_len = 1;
283 } else {
284 attr->gid_tbl_len = QEDR_MAX_SGID;
285 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
286 }
Ram Amraniac1b36e2016-10-10 13:15:32 +0300287 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
288 attr->qkey_viol_cntr = 0;
289 get_link_speed_and_width(rdma_port->link_speed,
290 &attr->active_speed, &attr->active_width);
291 attr->max_msg_sz = rdma_port->max_msg_size;
292 attr->max_vl_num = 4;
293
294 return 0;
295}
296
297int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
298 struct ib_port_modify *props)
299{
300 struct qedr_dev *dev;
301
302 dev = get_qedr_dev(ibdev);
303 if (port > 1) {
304 DP_ERR(dev, "invalid_port=0x%x\n", port);
305 return -EINVAL;
306 }
307
308 return 0;
309}
310
311static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
312 unsigned long len)
313{
314 struct qedr_mm *mm;
315
316 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
317 if (!mm)
318 return -ENOMEM;
319
320 mm->key.phy_addr = phy_addr;
321 /* This function might be called with a length which is not a multiple
322 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
323 * forces this granularity by increasing the requested size if needed.
324 * When qedr_mmap is called, it will search the list with the updated
325 * length as a key. To prevent search failures, the length is rounded up
326 * in advance to PAGE_SIZE.
327 */
328 mm->key.len = roundup(len, PAGE_SIZE);
329 INIT_LIST_HEAD(&mm->entry);
330
331 mutex_lock(&uctx->mm_list_lock);
332 list_add(&mm->entry, &uctx->mm_head);
333 mutex_unlock(&uctx->mm_list_lock);
334
335 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
336 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
337 (unsigned long long)mm->key.phy_addr,
338 (unsigned long)mm->key.len, uctx);
339
340 return 0;
341}
342
343static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
344 unsigned long len)
345{
346 bool found = false;
347 struct qedr_mm *mm;
348
349 mutex_lock(&uctx->mm_list_lock);
350 list_for_each_entry(mm, &uctx->mm_head, entry) {
351 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
352 continue;
353
354 found = true;
355 break;
356 }
357 mutex_unlock(&uctx->mm_list_lock);
358 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
359 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
360 mm->key.phy_addr, mm->key.len, uctx, found);
361
362 return found;
363}
364
365struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
366 struct ib_udata *udata)
367{
368 int rc;
369 struct qedr_ucontext *ctx;
370 struct qedr_alloc_ucontext_resp uresp;
371 struct qedr_dev *dev = get_qedr_dev(ibdev);
372 struct qed_rdma_add_user_out_params oparams;
373
374 if (!udata)
375 return ERR_PTR(-EFAULT);
376
377 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
378 if (!ctx)
379 return ERR_PTR(-ENOMEM);
380
381 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
382 if (rc) {
383 DP_ERR(dev,
384 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
385 rc);
386 goto err;
387 }
388
389 ctx->dpi = oparams.dpi;
390 ctx->dpi_addr = oparams.dpi_addr;
391 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
392 ctx->dpi_size = oparams.dpi_size;
393 INIT_LIST_HEAD(&ctx->mm_head);
394 mutex_init(&ctx->mm_list_lock);
395
396 memset(&uresp, 0, sizeof(uresp));
397
398 uresp.db_pa = ctx->dpi_phys_addr;
399 uresp.db_size = ctx->dpi_size;
400 uresp.max_send_wr = dev->attr.max_sqe;
401 uresp.max_recv_wr = dev->attr.max_rqe;
402 uresp.max_srq_wr = dev->attr.max_srq_wr;
403 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
404 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
405 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
406 uresp.max_cqes = QEDR_MAX_CQES;
407
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300408 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amraniac1b36e2016-10-10 13:15:32 +0300409 if (rc)
410 goto err;
411
412 ctx->dev = dev;
413
414 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
415 if (rc)
416 goto err;
417
418 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
419 &ctx->ibucontext);
420 return &ctx->ibucontext;
421
422err:
423 kfree(ctx);
424 return ERR_PTR(rc);
425}
426
427int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
428{
429 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
430 struct qedr_mm *mm, *tmp;
431 int status = 0;
432
433 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
434 uctx);
435 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
436
437 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
438 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
439 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
440 mm->key.phy_addr, mm->key.len, uctx);
441 list_del(&mm->entry);
442 kfree(mm);
443 }
444
445 kfree(uctx);
446 return status;
447}
448
449int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
450{
451 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
452 struct qedr_dev *dev = get_qedr_dev(context->device);
453 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
454 u64 unmapped_db = dev->db_phys_addr;
455 unsigned long len = (vma->vm_end - vma->vm_start);
456 int rc = 0;
457 bool found;
458
459 DP_DEBUG(dev, QEDR_MSG_INIT,
460 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
461 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
462 if (vma->vm_start & (PAGE_SIZE - 1)) {
463 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
464 vma->vm_start);
465 return -EINVAL;
466 }
467
468 found = qedr_search_mmap(ucontext, vm_page, len);
469 if (!found) {
470 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
471 vma->vm_pgoff);
472 return -EINVAL;
473 }
474
475 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
476
477 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
478 dev->db_size))) {
479 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
480 if (vma->vm_flags & VM_READ) {
481 DP_ERR(dev, "Trying to map doorbell bar for read\n");
482 return -EPERM;
483 }
484
485 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
486
487 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
488 PAGE_SIZE, vma->vm_page_prot);
489 } else {
490 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
491 rc = remap_pfn_range(vma, vma->vm_start,
492 vma->vm_pgoff, len, vma->vm_page_prot);
493 }
494 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
495 return rc;
496}
Ram Amrania7efd772016-10-10 13:15:33 +0300497
498struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
499 struct ib_ucontext *context, struct ib_udata *udata)
500{
501 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300502 struct qedr_pd *pd;
503 u16 pd_id;
504 int rc;
505
506 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
507 (udata && context) ? "User Lib" : "Kernel");
508
509 if (!dev->rdma_ctx) {
510 DP_ERR(dev, "invlaid RDMA context\n");
511 return ERR_PTR(-EINVAL);
512 }
513
514 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
515 if (!pd)
516 return ERR_PTR(-ENOMEM);
517
Ram Amrani9c1e0222017-01-24 13:51:42 +0200518 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
519 if (rc)
520 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300521
Ram Amrania7efd772016-10-10 13:15:33 +0300522 pd->pd_id = pd_id;
523
524 if (udata && context) {
Ram Amrani9c1e0222017-01-24 13:51:42 +0200525 struct qedr_alloc_pd_uresp uresp;
526
527 uresp.pd_id = pd_id;
528
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300529 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200530 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300531 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200532 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
533 goto err;
534 }
535
536 pd->uctx = get_qedr_ucontext(context);
537 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300538 }
539
540 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200541
542err:
543 kfree(pd);
544 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300545}
546
547int qedr_dealloc_pd(struct ib_pd *ibpd)
548{
549 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
550 struct qedr_pd *pd = get_qedr_pd(ibpd);
551
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100552 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300553 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100554 return -EINVAL;
555 }
Ram Amrania7efd772016-10-10 13:15:33 +0300556
557 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
558 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
559
560 kfree(pd);
561
562 return 0;
563}
564
565static void qedr_free_pbl(struct qedr_dev *dev,
566 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
567{
568 struct pci_dev *pdev = dev->pdev;
569 int i;
570
571 for (i = 0; i < pbl_info->num_pbls; i++) {
572 if (!pbl[i].va)
573 continue;
574 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
575 pbl[i].va, pbl[i].pa);
576 }
577
578 kfree(pbl);
579}
580
581#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
582#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
583
584#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
585#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
586#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
587
588static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
589 struct qedr_pbl_info *pbl_info,
590 gfp_t flags)
591{
592 struct pci_dev *pdev = dev->pdev;
593 struct qedr_pbl *pbl_table;
594 dma_addr_t *pbl_main_tbl;
595 dma_addr_t pa;
596 void *va;
597 int i;
598
599 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
600 if (!pbl_table)
601 return ERR_PTR(-ENOMEM);
602
603 for (i = 0; i < pbl_info->num_pbls; i++) {
604 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
605 &pa, flags);
606 if (!va)
607 goto err;
608
609 memset(va, 0, pbl_info->pbl_size);
610 pbl_table[i].va = va;
611 pbl_table[i].pa = pa;
612 }
613
614 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
615 * the first one with physical pointers to all of the rest
616 */
617 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
618 for (i = 0; i < pbl_info->num_pbls - 1; i++)
619 pbl_main_tbl[i] = pbl_table[i + 1].pa;
620
621 return pbl_table;
622
623err:
624 for (i--; i >= 0; i--)
625 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
626 pbl_table[i].va, pbl_table[i].pa);
627
628 qedr_free_pbl(dev, pbl_info, pbl_table);
629
630 return ERR_PTR(-ENOMEM);
631}
632
633static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
634 struct qedr_pbl_info *pbl_info,
635 u32 num_pbes, int two_layer_capable)
636{
637 u32 pbl_capacity;
638 u32 pbl_size;
639 u32 num_pbls;
640
641 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
642 if (num_pbes > MAX_PBES_TWO_LAYER) {
643 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
644 num_pbes);
645 return -EINVAL;
646 }
647
648 /* calculate required pbl page size */
649 pbl_size = MIN_FW_PBL_PAGE_SIZE;
650 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
651 NUM_PBES_ON_PAGE(pbl_size);
652
653 while (pbl_capacity < num_pbes) {
654 pbl_size *= 2;
655 pbl_capacity = pbl_size / sizeof(u64);
656 pbl_capacity = pbl_capacity * pbl_capacity;
657 }
658
659 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
660 num_pbls++; /* One for the layer0 ( points to the pbls) */
661 pbl_info->two_layered = true;
662 } else {
663 /* One layered PBL */
664 num_pbls = 1;
665 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
666 roundup_pow_of_two((num_pbes * sizeof(u64))));
667 pbl_info->two_layered = false;
668 }
669
670 pbl_info->num_pbls = num_pbls;
671 pbl_info->pbl_size = pbl_size;
672 pbl_info->num_pbes = num_pbes;
673
674 DP_DEBUG(dev, QEDR_MSG_MR,
675 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
676 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
677
678 return 0;
679}
680
681static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
682 struct qedr_pbl *pbl,
Ram Amranie57bb6b2017-06-05 16:32:27 +0300683 struct qedr_pbl_info *pbl_info, u32 pg_shift)
Ram Amrania7efd772016-10-10 13:15:33 +0300684{
685 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300686 u32 fw_pg_cnt, fw_pg_per_umem_pg;
Ram Amrania7efd772016-10-10 13:15:33 +0300687 struct qedr_pbl *pbl_tbl;
688 struct scatterlist *sg;
689 struct regpair *pbe;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300690 u64 pg_addr;
Ram Amrania7efd772016-10-10 13:15:33 +0300691 int entry;
Ram Amrania7efd772016-10-10 13:15:33 +0300692
693 if (!pbl_info->num_pbes)
694 return;
695
696 /* If we have a two layered pbl, the first pbl points to the rest
697 * of the pbls and the first entry lays on the second pbl in the table
698 */
699 if (pbl_info->two_layered)
700 pbl_tbl = &pbl[1];
701 else
702 pbl_tbl = pbl;
703
704 pbe = (struct regpair *)pbl_tbl->va;
705 if (!pbe) {
706 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
707 return;
708 }
709
710 pbe_cnt = 0;
711
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300712 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300713
Ram Amranie57bb6b2017-06-05 16:32:27 +0300714 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
715
Ram Amrania7efd772016-10-10 13:15:33 +0300716 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
717 pages = sg_dma_len(sg) >> shift;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300718 pg_addr = sg_dma_address(sg);
Ram Amrania7efd772016-10-10 13:15:33 +0300719 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
Ram Amranie57bb6b2017-06-05 16:32:27 +0300720 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
721 pbe->lo = cpu_to_le32(pg_addr);
722 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
Ram Amrania7efd772016-10-10 13:15:33 +0300723
Ram Amranie57bb6b2017-06-05 16:32:27 +0300724 pg_addr += BIT(pg_shift);
725 pbe_cnt++;
726 total_num_pbes++;
727 pbe++;
Ram Amrania7efd772016-10-10 13:15:33 +0300728
Ram Amranie57bb6b2017-06-05 16:32:27 +0300729 if (total_num_pbes == pbl_info->num_pbes)
730 return;
731
732 /* If the given pbl is full storing the pbes,
733 * move to next pbl.
734 */
735 if (pbe_cnt ==
736 (pbl_info->pbl_size / sizeof(u64))) {
737 pbl_tbl++;
738 pbe = (struct regpair *)pbl_tbl->va;
739 pbe_cnt = 0;
740 }
741
742 fw_pg_cnt++;
Ram Amrania7efd772016-10-10 13:15:33 +0300743 }
744 }
745 }
746}
747
748static int qedr_copy_cq_uresp(struct qedr_dev *dev,
749 struct qedr_cq *cq, struct ib_udata *udata)
750{
751 struct qedr_create_cq_uresp uresp;
752 int rc;
753
754 memset(&uresp, 0, sizeof(uresp));
755
756 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
757 uresp.icid = cq->icid;
758
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300759 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrania7efd772016-10-10 13:15:33 +0300760 if (rc)
761 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
762
763 return rc;
764}
765
766static void consume_cqe(struct qedr_cq *cq)
767{
768 if (cq->latest_cqe == cq->toggle_cqe)
769 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
770
771 cq->latest_cqe = qed_chain_consume(&cq->pbl);
772}
773
774static inline int qedr_align_cq_entries(int entries)
775{
776 u64 size, aligned_size;
777
778 /* We allocate an extra entry that we don't report to the FW. */
779 size = (entries + 1) * QEDR_CQE_SIZE;
780 aligned_size = ALIGN(size, PAGE_SIZE);
781
782 return aligned_size / QEDR_CQE_SIZE;
783}
784
785static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
786 struct qedr_dev *dev,
787 struct qedr_userq *q,
788 u64 buf_addr, size_t buf_len,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300789 int access, int dmasync,
790 int alloc_and_init)
Ram Amrania7efd772016-10-10 13:15:33 +0300791{
Ram Amranie57bb6b2017-06-05 16:32:27 +0300792 u32 fw_pages;
Ram Amrania7efd772016-10-10 13:15:33 +0300793 int rc;
794
795 q->buf_addr = buf_addr;
796 q->buf_len = buf_len;
797 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
798 if (IS_ERR(q->umem)) {
799 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
800 PTR_ERR(q->umem));
801 return PTR_ERR(q->umem);
802 }
803
Ram Amranie57bb6b2017-06-05 16:32:27 +0300804 fw_pages = ib_umem_page_count(q->umem) <<
805 (q->umem->page_shift - FW_PAGE_SHIFT);
806
807 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
Ram Amrania7efd772016-10-10 13:15:33 +0300808 if (rc)
809 goto err0;
810
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300811 if (alloc_and_init) {
812 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
813 if (IS_ERR(q->pbl_tbl)) {
814 rc = PTR_ERR(q->pbl_tbl);
815 goto err0;
816 }
Ram Amranie57bb6b2017-06-05 16:32:27 +0300817 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
818 FW_PAGE_SHIFT);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300819 } else {
820 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
Dan Carpenter89fd2572017-08-25 11:18:39 +0300821 if (!q->pbl_tbl) {
822 rc = -ENOMEM;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300823 goto err0;
Dan Carpenter89fd2572017-08-25 11:18:39 +0300824 }
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300825 }
Ram Amrania7efd772016-10-10 13:15:33 +0300826
827 return 0;
828
829err0:
830 ib_umem_release(q->umem);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300831 q->umem = NULL;
Ram Amrania7efd772016-10-10 13:15:33 +0300832
833 return rc;
834}
835
836static inline void qedr_init_cq_params(struct qedr_cq *cq,
837 struct qedr_ucontext *ctx,
838 struct qedr_dev *dev, int vector,
839 int chain_entries, int page_cnt,
840 u64 pbl_ptr,
841 struct qed_rdma_create_cq_in_params
842 *params)
843{
844 memset(params, 0, sizeof(*params));
845 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
846 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
847 params->cnq_id = vector;
848 params->cq_size = chain_entries - 1;
849 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
850 params->pbl_num_pages = page_cnt;
851 params->pbl_ptr = pbl_ptr;
852 params->pbl_two_level = 0;
853}
854
855static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
856{
857 /* Flush data before signalling doorbell */
858 wmb();
859 cq->db.data.agg_flags = flags;
860 cq->db.data.value = cpu_to_le32(cons);
861 writeq(cq->db.raw, cq->db_addr);
862
863 /* Make sure write would stick */
864 mmiowb();
865}
866
867int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
868{
869 struct qedr_cq *cq = get_qedr_cq(ibcq);
870 unsigned long sflags;
Amrani, Ram4dd72632017-04-27 13:35:34 +0300871 struct qedr_dev *dev;
872
873 dev = get_qedr_dev(ibcq->device);
874
875 if (cq->destroyed) {
876 DP_ERR(dev,
877 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
878 cq, cq->icid);
879 return -EINVAL;
880 }
881
Ram Amrania7efd772016-10-10 13:15:33 +0300882
883 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
884 return 0;
885
886 spin_lock_irqsave(&cq->cq_lock, sflags);
887
888 cq->arm_flags = 0;
889
890 if (flags & IB_CQ_SOLICITED)
891 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
892
893 if (flags & IB_CQ_NEXT_COMP)
894 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
895
896 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
897
898 spin_unlock_irqrestore(&cq->cq_lock, sflags);
899
900 return 0;
901}
902
903struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
904 const struct ib_cq_init_attr *attr,
905 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
906{
907 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
908 struct qed_rdma_destroy_cq_out_params destroy_oparams;
909 struct qed_rdma_destroy_cq_in_params destroy_iparams;
910 struct qedr_dev *dev = get_qedr_dev(ibdev);
911 struct qed_rdma_create_cq_in_params params;
912 struct qedr_create_cq_ureq ureq;
913 int vector = attr->comp_vector;
914 int entries = attr->cqe;
915 struct qedr_cq *cq;
916 int chain_entries;
917 int page_cnt;
918 u64 pbl_ptr;
919 u16 icid;
920 int rc;
921
922 DP_DEBUG(dev, QEDR_MSG_INIT,
923 "create_cq: called from %s. entries=%d, vector=%d\n",
924 udata ? "User Lib" : "Kernel", entries, vector);
925
926 if (entries > QEDR_MAX_CQES) {
927 DP_ERR(dev,
928 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
929 entries, QEDR_MAX_CQES);
930 return ERR_PTR(-EINVAL);
931 }
932
933 chain_entries = qedr_align_cq_entries(entries);
934 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
935
936 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
937 if (!cq)
938 return ERR_PTR(-ENOMEM);
939
940 if (udata) {
941 memset(&ureq, 0, sizeof(ureq));
942 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
943 DP_ERR(dev,
944 "create cq: problem copying data from user space\n");
945 goto err0;
946 }
947
948 if (!ureq.len) {
949 DP_ERR(dev,
950 "create cq: cannot create a cq with 0 entries\n");
951 goto err0;
952 }
953
954 cq->cq_type = QEDR_CQ_TYPE_USER;
955
956 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300957 ureq.len, IB_ACCESS_LOCAL_WRITE,
958 1, 1);
Ram Amrania7efd772016-10-10 13:15:33 +0300959 if (rc)
960 goto err0;
961
962 pbl_ptr = cq->q.pbl_tbl->pa;
963 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200964
965 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300966 } else {
967 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
968
969 rc = dev->ops->common->chain_alloc(dev->cdev,
970 QED_CHAIN_USE_TO_CONSUME,
971 QED_CHAIN_MODE_PBL,
972 QED_CHAIN_CNT_TYPE_U32,
973 chain_entries,
974 sizeof(union rdma_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +0300975 &cq->pbl, NULL);
Ram Amrania7efd772016-10-10 13:15:33 +0300976 if (rc)
977 goto err1;
978
979 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
980 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200981 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300982 }
983
984 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
985 pbl_ptr, &params);
986
987 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
988 if (rc)
989 goto err2;
990
991 cq->icid = icid;
992 cq->sig = QEDR_CQ_MAGIC_NUMBER;
993 spin_lock_init(&cq->cq_lock);
994
995 if (ib_ctx) {
996 rc = qedr_copy_cq_uresp(dev, cq, udata);
997 if (rc)
998 goto err3;
999 } else {
1000 /* Generate doorbell address. */
1001 cq->db_addr = dev->db_addr +
1002 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1003 cq->db.data.icid = cq->icid;
1004 cq->db.data.params = DB_AGG_CMD_SET <<
1005 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1006
1007 /* point to the very last element, passing it we will toggle */
1008 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1009 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1010 cq->latest_cqe = NULL;
1011 consume_cqe(cq);
1012 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1013 }
1014
1015 DP_DEBUG(dev, QEDR_MSG_CQ,
1016 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1017 cq->icid, cq, params.cq_size);
1018
1019 return &cq->ibcq;
1020
1021err3:
1022 destroy_iparams.icid = cq->icid;
1023 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1024 &destroy_oparams);
1025err2:
1026 if (udata)
1027 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1028 else
1029 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1030err1:
1031 if (udata)
1032 ib_umem_release(cq->q.umem);
1033err0:
1034 kfree(cq);
1035 return ERR_PTR(-EINVAL);
1036}
1037
1038int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1039{
1040 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1041 struct qedr_cq *cq = get_qedr_cq(ibcq);
1042
1043 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1044
1045 return 0;
1046}
1047
Amrani, Ram4dd72632017-04-27 13:35:34 +03001048#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1049#define QEDR_DESTROY_CQ_ITER_DURATION (10)
1050
Ram Amrania7efd772016-10-10 13:15:33 +03001051int qedr_destroy_cq(struct ib_cq *ibcq)
1052{
1053 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1054 struct qed_rdma_destroy_cq_out_params oparams;
1055 struct qed_rdma_destroy_cq_in_params iparams;
1056 struct qedr_cq *cq = get_qedr_cq(ibcq);
Amrani, Ram4dd72632017-04-27 13:35:34 +03001057 int iter;
Amrani, Ram942b3b22017-04-27 13:35:33 +03001058 int rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001059
Amrani, Ram942b3b22017-04-27 13:35:33 +03001060 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
Ram Amrania7efd772016-10-10 13:15:33 +03001061
Amrani, Ram4dd72632017-04-27 13:35:34 +03001062 cq->destroyed = 1;
1063
Ram Amrania7efd772016-10-10 13:15:33 +03001064 /* GSIs CQs are handled by driver, so they don't exist in the FW */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001065 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1066 goto done;
Amrani, Rama1211352016-12-22 14:40:34 +02001067
Amrani, Ram942b3b22017-04-27 13:35:33 +03001068 iparams.icid = cq->icid;
1069 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1070 if (rc)
1071 return rc;
1072
1073 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +03001074
1075 if (ibcq->uobject && ibcq->uobject->context) {
1076 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1077 ib_umem_release(cq->q.umem);
1078 }
1079
Amrani, Ram4dd72632017-04-27 13:35:34 +03001080 /* We don't want the IRQ handler to handle a non-existing CQ so we
1081 * wait until all CNQ interrupts, if any, are received. This will always
1082 * happen and will always happen very fast. If not, then a serious error
1083 * has occured. That is why we can use a long delay.
1084 * We spin for a short time so we don’t lose time on context switching
1085 * in case all the completions are handled in that span. Otherwise
1086 * we sleep for a while and check again. Since the CNQ may be
1087 * associated with (only) the current CPU we use msleep to allow the
1088 * current CPU to be freed.
1089 * The CNQ notification is increased in qedr_irq_handler().
1090 */
1091 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1092 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1093 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1094 iter--;
1095 }
1096
1097 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1098 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1099 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1100 iter--;
1101 }
1102
1103 if (oparams.num_cq_notif != cq->cnq_notif)
1104 goto err;
1105
1106 /* Note that we don't need to have explicit code to wait for the
1107 * completion of the event handler because it is invoked from the EQ.
1108 * Since the destroy CQ ramrod has also been received on the EQ we can
1109 * be certain that there's no event handler in process.
1110 */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001111done:
Amrani, Ram4dd72632017-04-27 13:35:34 +03001112 cq->sig = ~cq->sig;
1113
Ram Amrania7efd772016-10-10 13:15:33 +03001114 kfree(cq);
1115
1116 return 0;
Amrani, Ram4dd72632017-04-27 13:35:34 +03001117
1118err:
1119 DP_ERR(dev,
1120 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1121 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1122
1123 return -EINVAL;
Ram Amrania7efd772016-10-10 13:15:33 +03001124}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001125
1126static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1127 struct ib_qp_attr *attr,
1128 int attr_mask,
1129 struct qed_rdma_modify_qp_in_params
1130 *qp_params)
1131{
1132 enum rdma_network_type nw_type;
1133 struct ib_gid_attr gid_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001134 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001135 union ib_gid gid;
1136 u32 ipv4_addr;
1137 int rc = 0;
1138 int i;
1139
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001140 rc = ib_get_cached_gid(ibqp->device,
1141 rdma_ah_get_port_num(&attr->ah_attr),
1142 grh->sgid_index, &gid, &gid_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001143 if (rc)
1144 return rc;
1145
1146 if (!memcmp(&gid, &zgid, sizeof(gid)))
1147 return -ENOENT;
1148
1149 if (gid_attr.ndev) {
1150 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1151
1152 dev_put(gid_attr.ndev);
1153 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1154 switch (nw_type) {
1155 case RDMA_NETWORK_IPV6:
1156 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1157 sizeof(qp_params->sgid));
1158 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001159 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001160 sizeof(qp_params->dgid));
1161 qp_params->roce_mode = ROCE_V2_IPV6;
1162 SET_FIELD(qp_params->modify_flags,
1163 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1164 break;
1165 case RDMA_NETWORK_IB:
1166 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1167 sizeof(qp_params->sgid));
1168 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001169 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001170 sizeof(qp_params->dgid));
1171 qp_params->roce_mode = ROCE_V1;
1172 break;
1173 case RDMA_NETWORK_IPV4:
1174 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1175 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1176 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1177 qp_params->sgid.ipv4_addr = ipv4_addr;
1178 ipv4_addr =
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001179 qedr_get_ipv4_from_gid(grh->dgid.raw);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001180 qp_params->dgid.ipv4_addr = ipv4_addr;
1181 SET_FIELD(qp_params->modify_flags,
1182 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1183 qp_params->roce_mode = ROCE_V2_IPV4;
1184 break;
1185 }
1186 }
1187
1188 for (i = 0; i < 4; i++) {
1189 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1190 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1191 }
1192
1193 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1194 qp_params->vlan_id = 0;
1195
1196 return 0;
1197}
1198
Ram Amranicecbcdd2016-10-10 13:15:34 +03001199static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1200 struct ib_qp_init_attr *attrs)
1201{
1202 struct qedr_device_attr *qattr = &dev->attr;
1203
1204 /* QP0... attrs->qp_type == IB_QPT_GSI */
1205 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1206 DP_DEBUG(dev, QEDR_MSG_QP,
1207 "create qp: unsupported qp type=0x%x requested\n",
1208 attrs->qp_type);
1209 return -EINVAL;
1210 }
1211
1212 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1213 DP_ERR(dev,
1214 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1215 attrs->cap.max_send_wr, qattr->max_sqe);
1216 return -EINVAL;
1217 }
1218
1219 if (attrs->cap.max_inline_data > qattr->max_inline) {
1220 DP_ERR(dev,
1221 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1222 attrs->cap.max_inline_data, qattr->max_inline);
1223 return -EINVAL;
1224 }
1225
1226 if (attrs->cap.max_send_sge > qattr->max_sge) {
1227 DP_ERR(dev,
1228 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1229 attrs->cap.max_send_sge, qattr->max_sge);
1230 return -EINVAL;
1231 }
1232
1233 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1234 DP_ERR(dev,
1235 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1236 attrs->cap.max_recv_sge, qattr->max_sge);
1237 return -EINVAL;
1238 }
1239
1240 /* Unprivileged user space cannot create special QP */
1241 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1242 DP_ERR(dev,
1243 "create qp: userspace can't create special QPs of type=0x%x\n",
1244 attrs->qp_type);
1245 return -EINVAL;
1246 }
1247
1248 return 0;
1249}
1250
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001251static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1252 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001253 struct qedr_qp *qp)
1254{
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001255 /* iWARP requires two doorbells per RQ. */
1256 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1257 uresp->rq_db_offset =
1258 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1259 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1260 } else {
1261 uresp->rq_db_offset =
1262 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1263 }
1264
Ram Amranicecbcdd2016-10-10 13:15:34 +03001265 uresp->rq_icid = qp->icid;
1266}
1267
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001268static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1269 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001270 struct qedr_qp *qp)
1271{
1272 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001273
1274 /* iWARP uses the same cid for rq and sq */
1275 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1276 uresp->sq_icid = qp->icid;
1277 else
1278 uresp->sq_icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001279}
1280
1281static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1282 struct qedr_qp *qp, struct ib_udata *udata)
1283{
1284 struct qedr_create_qp_uresp uresp;
1285 int rc;
1286
1287 memset(&uresp, 0, sizeof(uresp));
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001288 qedr_copy_sq_uresp(dev, &uresp, qp);
1289 qedr_copy_rq_uresp(dev, &uresp, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001290
1291 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1292 uresp.qp_id = qp->qp_id;
1293
Amrani, Ramc75d3ec2017-06-26 19:05:04 +03001294 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranicecbcdd2016-10-10 13:15:34 +03001295 if (rc)
1296 DP_ERR(dev,
1297 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1298 qp->icid);
1299
1300 return rc;
1301}
1302
Amrani, Ramdf158562016-12-22 14:52:24 +02001303static void qedr_set_common_qp_params(struct qedr_dev *dev,
1304 struct qedr_qp *qp,
1305 struct qedr_pd *pd,
1306 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001307{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001308 spin_lock_init(&qp->q_lock);
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001309 atomic_set(&qp->refcnt, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001310 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001311 qp->qp_type = attrs->qp_type;
1312 qp->max_inline_data = attrs->cap.max_inline_data;
1313 qp->sq.max_sges = attrs->cap.max_send_sge;
1314 qp->state = QED_ROCE_QP_STATE_RESET;
1315 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1316 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1317 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1318 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001319 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001320
1321 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001322 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1323 qp->rq.max_sges, qp->rq_cq->icid);
1324 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001325 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1326 pd->pd_id, qp->qp_type, qp->max_inline_data,
1327 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1328 DP_DEBUG(dev, QEDR_MSG_QP,
1329 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1330 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001331}
1332
Amrani, Ramdf158562016-12-22 14:52:24 +02001333static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001334{
1335 qp->sq.db = dev->db_addr +
1336 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1337 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001338 qp->rq.db = dev->db_addr +
1339 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1340 qp->rq.db_data.data.icid = qp->icid;
1341}
1342
Amrani, Ramdf158562016-12-22 14:52:24 +02001343static inline void
1344qedr_init_common_qp_in_params(struct qedr_dev *dev,
1345 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001346 struct qedr_qp *qp,
1347 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001348 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001349 struct qed_rdma_create_qp_in_params *params)
1350{
Amrani, Ramdf158562016-12-22 14:52:24 +02001351 /* QP handle to be written in an async event */
1352 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1353 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001354
Amrani, Ramdf158562016-12-22 14:52:24 +02001355 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1356 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1357 params->pd = pd->pd_id;
1358 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1359 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1360 params->stats_queue = 0;
1361 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1362 params->srq_id = 0;
1363 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001364}
1365
Amrani, Ramdf158562016-12-22 14:52:24 +02001366static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001367{
Amrani, Ramdf158562016-12-22 14:52:24 +02001368 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1369 "qp=%p. "
1370 "sq_addr=0x%llx, "
1371 "sq_len=%zd, "
1372 "rq_addr=0x%llx, "
1373 "rq_len=%zd"
1374 "\n",
1375 qp,
1376 qp->usq.buf_addr,
1377 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1378}
1379
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001380static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
1381{
1382 int rc;
1383
1384 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1385 return 0;
1386
1387 idr_preload(GFP_KERNEL);
1388 spin_lock_irq(&dev->idr_lock);
1389
1390 rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
1391
1392 spin_unlock_irq(&dev->idr_lock);
1393 idr_preload_end();
1394
1395 return rc < 0 ? rc : 0;
1396}
1397
1398static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
1399{
1400 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1401 return;
1402
1403 spin_lock_irq(&dev->idr_lock);
1404 idr_remove(&dev->qpidr, id);
1405 spin_unlock_irq(&dev->idr_lock);
1406}
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001407
1408static inline void
1409qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1410 struct qedr_qp *qp,
1411 struct qed_rdma_create_qp_out_params *out_params)
1412{
1413 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1414 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1415
1416 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1417 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1418
1419 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1420 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1421
1422 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1423 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1424}
1425
Amrani, Ramdf158562016-12-22 14:52:24 +02001426static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1427{
1428 if (qp->usq.umem)
1429 ib_umem_release(qp->usq.umem);
1430 qp->usq.umem = NULL;
1431
1432 if (qp->urq.umem)
1433 ib_umem_release(qp->urq.umem);
1434 qp->urq.umem = NULL;
1435}
1436
1437static int qedr_create_user_qp(struct qedr_dev *dev,
1438 struct qedr_qp *qp,
1439 struct ib_pd *ibpd,
1440 struct ib_udata *udata,
1441 struct ib_qp_init_attr *attrs)
1442{
1443 struct qed_rdma_create_qp_in_params in_params;
1444 struct qed_rdma_create_qp_out_params out_params;
1445 struct qedr_pd *pd = get_qedr_pd(ibpd);
1446 struct ib_ucontext *ib_ctx = NULL;
1447 struct qedr_ucontext *ctx = NULL;
1448 struct qedr_create_qp_ureq ureq;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001449 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001450 int rc = -EINVAL;
1451
1452 ib_ctx = ibpd->uobject->context;
1453 ctx = get_qedr_ucontext(ib_ctx);
1454
1455 memset(&ureq, 0, sizeof(ureq));
1456 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1457 if (rc) {
1458 DP_ERR(dev, "Problem copying data from user space\n");
1459 return rc;
1460 }
1461
1462 /* SQ - read access only (0), dma sync not required (0) */
1463 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001464 ureq.sq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001465 if (rc)
1466 return rc;
1467
1468 /* RQ - read access only (0), dma sync not required (0) */
1469 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001470 ureq.rq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001471 if (rc)
1472 return rc;
1473
1474 memset(&in_params, 0, sizeof(in_params));
1475 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1476 in_params.qp_handle_lo = ureq.qp_handle_lo;
1477 in_params.qp_handle_hi = ureq.qp_handle_hi;
1478 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1479 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1480 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1481 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1482
1483 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1484 &in_params, &out_params);
1485
1486 if (!qp->qed_qp) {
1487 rc = -ENOMEM;
1488 goto err1;
1489 }
1490
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001491 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1492 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1493
Amrani, Ramdf158562016-12-22 14:52:24 +02001494 qp->qp_id = out_params.qp_id;
1495 qp->icid = out_params.icid;
1496
1497 rc = qedr_copy_qp_uresp(dev, qp, udata);
1498 if (rc)
1499 goto err;
1500
1501 qedr_qp_user_print(dev, qp);
1502
1503 return 0;
1504err:
1505 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1506 if (rc)
1507 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1508
1509err1:
1510 qedr_cleanup_user(dev, qp);
1511 return rc;
1512}
1513
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001514static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1515{
1516 qp->sq.db = dev->db_addr +
1517 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1518 qp->sq.db_data.data.icid = qp->icid;
1519
1520 qp->rq.db = dev->db_addr +
1521 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1522 qp->rq.db_data.data.icid = qp->icid;
1523 qp->rq.iwarp_db2 = dev->db_addr +
1524 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1525 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1526 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1527}
1528
Amrani, Ramdf158562016-12-22 14:52:24 +02001529static int
1530qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1531 struct qedr_qp *qp,
1532 struct qed_rdma_create_qp_in_params *in_params,
1533 u32 n_sq_elems, u32 n_rq_elems)
1534{
1535 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001536 int rc;
1537
Ram Amranicecbcdd2016-10-10 13:15:34 +03001538 rc = dev->ops->common->chain_alloc(dev->cdev,
1539 QED_CHAIN_USE_TO_PRODUCE,
1540 QED_CHAIN_MODE_PBL,
1541 QED_CHAIN_CNT_TYPE_U32,
1542 n_sq_elems,
1543 QEDR_SQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001544 &qp->sq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001545
1546 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001547 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001548
Amrani, Ramdf158562016-12-22 14:52:24 +02001549 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1550 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001551
Ram Amranicecbcdd2016-10-10 13:15:34 +03001552 rc = dev->ops->common->chain_alloc(dev->cdev,
1553 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1554 QED_CHAIN_MODE_PBL,
1555 QED_CHAIN_CNT_TYPE_U32,
1556 n_rq_elems,
1557 QEDR_RQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001558 &qp->rq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001559 if (rc)
1560 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001561
Amrani, Ramdf158562016-12-22 14:52:24 +02001562 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1563 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001564
Amrani, Ramdf158562016-12-22 14:52:24 +02001565 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1566 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001567
Amrani, Ramdf158562016-12-22 14:52:24 +02001568 if (!qp->qed_qp)
1569 return -EINVAL;
1570
1571 qp->qp_id = out_params.qp_id;
1572 qp->icid = out_params.icid;
1573
1574 qedr_set_roce_db_info(dev, qp);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001575 return rc;
1576}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001577
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001578static int
1579qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1580 struct qedr_qp *qp,
1581 struct qed_rdma_create_qp_in_params *in_params,
1582 u32 n_sq_elems, u32 n_rq_elems)
1583{
1584 struct qed_rdma_create_qp_out_params out_params;
1585 struct qed_chain_ext_pbl ext_pbl;
1586 int rc;
1587
1588 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1589 QEDR_SQE_ELEMENT_SIZE,
1590 QED_CHAIN_MODE_PBL);
1591 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1592 QEDR_RQE_ELEMENT_SIZE,
1593 QED_CHAIN_MODE_PBL);
1594
1595 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1596 in_params, &out_params);
1597
1598 if (!qp->qed_qp)
1599 return -EINVAL;
1600
1601 /* Now we allocate the chain */
1602 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1603 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1604
1605 rc = dev->ops->common->chain_alloc(dev->cdev,
1606 QED_CHAIN_USE_TO_PRODUCE,
1607 QED_CHAIN_MODE_PBL,
1608 QED_CHAIN_CNT_TYPE_U32,
1609 n_sq_elems,
1610 QEDR_SQE_ELEMENT_SIZE,
1611 &qp->sq.pbl, &ext_pbl);
1612
1613 if (rc)
1614 goto err;
1615
1616 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1617 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1618
1619 rc = dev->ops->common->chain_alloc(dev->cdev,
1620 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1621 QED_CHAIN_MODE_PBL,
1622 QED_CHAIN_CNT_TYPE_U32,
1623 n_rq_elems,
1624 QEDR_RQE_ELEMENT_SIZE,
1625 &qp->rq.pbl, &ext_pbl);
1626
1627 if (rc)
1628 goto err;
1629
1630 qp->qp_id = out_params.qp_id;
1631 qp->icid = out_params.icid;
1632
1633 qedr_set_iwarp_db_info(dev, qp);
1634 return rc;
1635
1636err:
1637 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1638
1639 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001640}
1641
Amrani, Ramdf158562016-12-22 14:52:24 +02001642static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001643{
Amrani, Ramdf158562016-12-22 14:52:24 +02001644 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1645 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001646
Amrani, Ramdf158562016-12-22 14:52:24 +02001647 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1648 kfree(qp->rqe_wr_id);
1649}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001650
Amrani, Ramdf158562016-12-22 14:52:24 +02001651static int qedr_create_kernel_qp(struct qedr_dev *dev,
1652 struct qedr_qp *qp,
1653 struct ib_pd *ibpd,
1654 struct ib_qp_init_attr *attrs)
1655{
1656 struct qed_rdma_create_qp_in_params in_params;
1657 struct qedr_pd *pd = get_qedr_pd(ibpd);
1658 int rc = -EINVAL;
1659 u32 n_rq_elems;
1660 u32 n_sq_elems;
1661 u32 n_sq_entries;
1662
1663 memset(&in_params, 0, sizeof(in_params));
1664
1665 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1666 * the ring. The ring should allow at least a single WR, even if the
1667 * user requested none, due to allocation issues.
1668 * We should add an extra WR since the prod and cons indices of
1669 * wqe_wr_id are managed in such a way that the WQ is considered full
1670 * when (prod+1)%max_wr==cons. We currently don't do that because we
1671 * double the number of entries due an iSER issue that pushes far more
1672 * WRs than indicated. If we decline its ib_post_send() then we get
1673 * error prints in the dmesg we'd like to avoid.
1674 */
1675 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1676 dev->attr.max_sqe);
1677
1678 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1679 GFP_KERNEL);
1680 if (!qp->wqe_wr_id) {
1681 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1682 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001683 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001684
Amrani, Ramdf158562016-12-22 14:52:24 +02001685 /* QP handle to be written in CQE */
1686 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1687 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001688
Amrani, Ramdf158562016-12-22 14:52:24 +02001689 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1690 * the ring. There ring should allow at least a single WR, even if the
1691 * user requested none, due to allocation issues.
1692 */
1693 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1694
1695 /* Allocate driver internal RQ array */
1696 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1697 GFP_KERNEL);
1698 if (!qp->rqe_wr_id) {
1699 DP_ERR(dev,
1700 "create qp: failed RQ shadow memory allocation\n");
1701 kfree(qp->wqe_wr_id);
1702 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001703 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001704
Amrani, Ramdf158562016-12-22 14:52:24 +02001705 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001706
Amrani, Ramdf158562016-12-22 14:52:24 +02001707 n_sq_entries = attrs->cap.max_send_wr;
1708 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1709 n_sq_entries = max_t(u32, n_sq_entries, 1);
1710 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001711
Amrani, Ramdf158562016-12-22 14:52:24 +02001712 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1713
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001714 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1715 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1716 n_sq_elems, n_rq_elems);
1717 else
1718 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1719 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001720 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001721 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001722
1723 return rc;
1724}
1725
1726struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1727 struct ib_qp_init_attr *attrs,
1728 struct ib_udata *udata)
1729{
1730 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001731 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001732 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001733 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001734 int rc = 0;
1735
1736 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1737 udata ? "user library" : "kernel", pd);
1738
1739 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1740 if (rc)
1741 return ERR_PTR(rc);
1742
Wei Yongjun181d8012016-10-28 16:33:47 +00001743 if (attrs->srq)
1744 return ERR_PTR(-EINVAL);
1745
Ram Amranicecbcdd2016-10-10 13:15:34 +03001746 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001747 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1748 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001749 get_qedr_cq(attrs->send_cq),
1750 get_qedr_cq(attrs->send_cq)->icid,
1751 get_qedr_cq(attrs->recv_cq),
1752 get_qedr_cq(attrs->recv_cq)->icid);
1753
Amrani, Ramdf158562016-12-22 14:52:24 +02001754 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1755 if (!qp) {
1756 DP_ERR(dev, "create qp: failed allocating memory\n");
1757 return ERR_PTR(-ENOMEM);
1758 }
1759
1760 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001761
Ram Amrani04886772016-10-10 13:15:38 +03001762 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001763 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1764 if (IS_ERR(ibqp))
1765 kfree(qp);
1766 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001767 }
1768
Amrani, Ramdf158562016-12-22 14:52:24 +02001769 if (udata)
1770 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1771 else
1772 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001773
Amrani, Ramdf158562016-12-22 14:52:24 +02001774 if (rc)
1775 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001776
Ram Amranicecbcdd2016-10-10 13:15:34 +03001777 qp->ibqp.qp_num = qp->qp_id;
1778
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001779 rc = qedr_idr_add(dev, qp, qp->qp_id);
1780 if (rc)
1781 goto err;
1782
Ram Amranicecbcdd2016-10-10 13:15:34 +03001783 return &qp->ibqp;
1784
Amrani, Ramdf158562016-12-22 14:52:24 +02001785err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001786 kfree(qp);
1787
1788 return ERR_PTR(-EFAULT);
1789}
1790
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001791static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001792{
1793 switch (qp_state) {
1794 case QED_ROCE_QP_STATE_RESET:
1795 return IB_QPS_RESET;
1796 case QED_ROCE_QP_STATE_INIT:
1797 return IB_QPS_INIT;
1798 case QED_ROCE_QP_STATE_RTR:
1799 return IB_QPS_RTR;
1800 case QED_ROCE_QP_STATE_RTS:
1801 return IB_QPS_RTS;
1802 case QED_ROCE_QP_STATE_SQD:
1803 return IB_QPS_SQD;
1804 case QED_ROCE_QP_STATE_ERR:
1805 return IB_QPS_ERR;
1806 case QED_ROCE_QP_STATE_SQE:
1807 return IB_QPS_SQE;
1808 }
1809 return IB_QPS_ERR;
1810}
1811
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001812static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1813 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001814{
1815 switch (qp_state) {
1816 case IB_QPS_RESET:
1817 return QED_ROCE_QP_STATE_RESET;
1818 case IB_QPS_INIT:
1819 return QED_ROCE_QP_STATE_INIT;
1820 case IB_QPS_RTR:
1821 return QED_ROCE_QP_STATE_RTR;
1822 case IB_QPS_RTS:
1823 return QED_ROCE_QP_STATE_RTS;
1824 case IB_QPS_SQD:
1825 return QED_ROCE_QP_STATE_SQD;
1826 case IB_QPS_ERR:
1827 return QED_ROCE_QP_STATE_ERR;
1828 default:
1829 return QED_ROCE_QP_STATE_ERR;
1830 }
1831}
1832
1833static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1834{
1835 qed_chain_reset(&qph->pbl);
1836 qph->prod = 0;
1837 qph->cons = 0;
1838 qph->wqe_cons = 0;
1839 qph->db_data.data.value = cpu_to_le16(0);
1840}
1841
1842static int qedr_update_qp_state(struct qedr_dev *dev,
1843 struct qedr_qp *qp,
1844 enum qed_roce_qp_state new_state)
1845{
1846 int status = 0;
1847
1848 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001849 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001850
1851 switch (qp->state) {
1852 case QED_ROCE_QP_STATE_RESET:
1853 switch (new_state) {
1854 case QED_ROCE_QP_STATE_INIT:
1855 qp->prev_wqe_size = 0;
1856 qedr_reset_qp_hwq_info(&qp->sq);
1857 qedr_reset_qp_hwq_info(&qp->rq);
1858 break;
1859 default:
1860 status = -EINVAL;
1861 break;
1862 };
1863 break;
1864 case QED_ROCE_QP_STATE_INIT:
1865 switch (new_state) {
1866 case QED_ROCE_QP_STATE_RTR:
1867 /* Update doorbell (in case post_recv was
1868 * done before move to RTR)
1869 */
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001870
1871 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1872 wmb();
1873 writel(qp->rq.db_data.raw, qp->rq.db);
1874 /* Make sure write takes effect */
1875 mmiowb();
1876 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001877 break;
1878 case QED_ROCE_QP_STATE_ERR:
1879 break;
1880 default:
1881 /* Invalid state change. */
1882 status = -EINVAL;
1883 break;
1884 };
1885 break;
1886 case QED_ROCE_QP_STATE_RTR:
1887 /* RTR->XXX */
1888 switch (new_state) {
1889 case QED_ROCE_QP_STATE_RTS:
1890 break;
1891 case QED_ROCE_QP_STATE_ERR:
1892 break;
1893 default:
1894 /* Invalid state change. */
1895 status = -EINVAL;
1896 break;
1897 };
1898 break;
1899 case QED_ROCE_QP_STATE_RTS:
1900 /* RTS->XXX */
1901 switch (new_state) {
1902 case QED_ROCE_QP_STATE_SQD:
1903 break;
1904 case QED_ROCE_QP_STATE_ERR:
1905 break;
1906 default:
1907 /* Invalid state change. */
1908 status = -EINVAL;
1909 break;
1910 };
1911 break;
1912 case QED_ROCE_QP_STATE_SQD:
1913 /* SQD->XXX */
1914 switch (new_state) {
1915 case QED_ROCE_QP_STATE_RTS:
1916 case QED_ROCE_QP_STATE_ERR:
1917 break;
1918 default:
1919 /* Invalid state change. */
1920 status = -EINVAL;
1921 break;
1922 };
1923 break;
1924 case QED_ROCE_QP_STATE_ERR:
1925 /* ERR->XXX */
1926 switch (new_state) {
1927 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001928 if ((qp->rq.prod != qp->rq.cons) ||
1929 (qp->sq.prod != qp->sq.cons)) {
1930 DP_NOTICE(dev,
1931 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1932 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1933 qp->sq.cons);
1934 status = -EINVAL;
1935 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001936 break;
1937 default:
1938 status = -EINVAL;
1939 break;
1940 };
1941 break;
1942 default:
1943 status = -EINVAL;
1944 break;
1945 };
1946
1947 return status;
1948}
1949
1950int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1951 int attr_mask, struct ib_udata *udata)
1952{
1953 struct qedr_qp *qp = get_qedr_qp(ibqp);
1954 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1955 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001956 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001957 enum ib_qp_state old_qp_state, new_qp_state;
1958 int rc = 0;
1959
1960 DP_DEBUG(dev, QEDR_MSG_QP,
1961 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1962 attr->qp_state);
1963
1964 old_qp_state = qedr_get_ibqp_state(qp->state);
1965 if (attr_mask & IB_QP_STATE)
1966 new_qp_state = attr->qp_state;
1967 else
1968 new_qp_state = old_qp_state;
1969
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001970 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1971 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
1972 ibqp->qp_type, attr_mask,
1973 IB_LINK_LAYER_ETHERNET)) {
1974 DP_ERR(dev,
1975 "modify qp: invalid attribute mask=0x%x specified for\n"
1976 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1977 attr_mask, qp->qp_id, ibqp->qp_type,
1978 old_qp_state, new_qp_state);
1979 rc = -EINVAL;
1980 goto err;
1981 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001982 }
1983
1984 /* Translate the masks... */
1985 if (attr_mask & IB_QP_STATE) {
1986 SET_FIELD(qp_params.modify_flags,
1987 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1988 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1989 }
1990
1991 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1992 qp_params.sqd_async = true;
1993
1994 if (attr_mask & IB_QP_PKEY_INDEX) {
1995 SET_FIELD(qp_params.modify_flags,
1996 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1997 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1998 rc = -EINVAL;
1999 goto err;
2000 }
2001
2002 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2003 }
2004
2005 if (attr_mask & IB_QP_QKEY)
2006 qp->qkey = attr->qkey;
2007
2008 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2009 SET_FIELD(qp_params.modify_flags,
2010 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2011 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2012 IB_ACCESS_REMOTE_READ;
2013 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2014 IB_ACCESS_REMOTE_WRITE;
2015 qp_params.incoming_atomic_en = attr->qp_access_flags &
2016 IB_ACCESS_REMOTE_ATOMIC;
2017 }
2018
2019 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2020 if (attr_mask & IB_QP_PATH_MTU) {
2021 if (attr->path_mtu < IB_MTU_256 ||
2022 attr->path_mtu > IB_MTU_4096) {
2023 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2024 rc = -EINVAL;
2025 goto err;
2026 }
2027 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2028 ib_mtu_enum_to_int(iboe_get_mtu
2029 (dev->ndev->mtu)));
2030 }
2031
2032 if (!qp->mtu) {
2033 qp->mtu =
2034 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2035 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2036 }
2037
2038 SET_FIELD(qp_params.modify_flags,
2039 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2040
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002041 qp_params.traffic_class_tos = grh->traffic_class;
2042 qp_params.flow_label = grh->flow_label;
2043 qp_params.hop_limit_ttl = grh->hop_limit;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002044
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002045 qp->sgid_idx = grh->sgid_index;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002046
2047 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2048 if (rc) {
2049 DP_ERR(dev,
2050 "modify qp: problems with GID index %d (rc=%d)\n",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002051 grh->sgid_index, rc);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002052 return rc;
2053 }
2054
2055 rc = qedr_get_dmac(dev, &attr->ah_attr,
2056 qp_params.remote_mac_addr);
2057 if (rc)
2058 return rc;
2059
2060 qp_params.use_local_mac = true;
2061 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2062
2063 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2064 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2065 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2066 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2067 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2068 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2069 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2070 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002071
2072 qp_params.mtu = qp->mtu;
2073 qp_params.lb_indication = false;
2074 }
2075
2076 if (!qp_params.mtu) {
2077 /* Stay with current MTU */
2078 if (qp->mtu)
2079 qp_params.mtu = qp->mtu;
2080 else
2081 qp_params.mtu =
2082 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2083 }
2084
2085 if (attr_mask & IB_QP_TIMEOUT) {
2086 SET_FIELD(qp_params.modify_flags,
2087 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2088
2089 qp_params.ack_timeout = attr->timeout;
2090 if (attr->timeout) {
2091 u32 temp;
2092
2093 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
2094 /* FW requires [msec] */
2095 qp_params.ack_timeout = temp;
2096 } else {
2097 /* Infinite */
2098 qp_params.ack_timeout = 0;
2099 }
2100 }
2101 if (attr_mask & IB_QP_RETRY_CNT) {
2102 SET_FIELD(qp_params.modify_flags,
2103 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2104 qp_params.retry_cnt = attr->retry_cnt;
2105 }
2106
2107 if (attr_mask & IB_QP_RNR_RETRY) {
2108 SET_FIELD(qp_params.modify_flags,
2109 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2110 qp_params.rnr_retry_cnt = attr->rnr_retry;
2111 }
2112
2113 if (attr_mask & IB_QP_RQ_PSN) {
2114 SET_FIELD(qp_params.modify_flags,
2115 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2116 qp_params.rq_psn = attr->rq_psn;
2117 qp->rq_psn = attr->rq_psn;
2118 }
2119
2120 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2121 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2122 rc = -EINVAL;
2123 DP_ERR(dev,
2124 "unsupported max_rd_atomic=%d, supported=%d\n",
2125 attr->max_rd_atomic,
2126 dev->attr.max_qp_req_rd_atomic_resc);
2127 goto err;
2128 }
2129
2130 SET_FIELD(qp_params.modify_flags,
2131 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2132 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2133 }
2134
2135 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2136 SET_FIELD(qp_params.modify_flags,
2137 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2138 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2139 }
2140
2141 if (attr_mask & IB_QP_SQ_PSN) {
2142 SET_FIELD(qp_params.modify_flags,
2143 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2144 qp_params.sq_psn = attr->sq_psn;
2145 qp->sq_psn = attr->sq_psn;
2146 }
2147
2148 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2149 if (attr->max_dest_rd_atomic >
2150 dev->attr.max_qp_resp_rd_atomic_resc) {
2151 DP_ERR(dev,
2152 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2153 attr->max_dest_rd_atomic,
2154 dev->attr.max_qp_resp_rd_atomic_resc);
2155
2156 rc = -EINVAL;
2157 goto err;
2158 }
2159
2160 SET_FIELD(qp_params.modify_flags,
2161 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2162 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2163 }
2164
2165 if (attr_mask & IB_QP_DEST_QPN) {
2166 SET_FIELD(qp_params.modify_flags,
2167 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2168
2169 qp_params.dest_qp = attr->dest_qp_num;
2170 qp->dest_qp_num = attr->dest_qp_num;
2171 }
2172
2173 if (qp->qp_type != IB_QPT_GSI)
2174 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2175 qp->qed_qp, &qp_params);
2176
2177 if (attr_mask & IB_QP_STATE) {
2178 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02002179 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002180 qp->state = qp_params.new_state;
2181 }
2182
2183err:
2184 return rc;
2185}
2186
2187static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2188{
2189 int ib_qp_acc_flags = 0;
2190
2191 if (params->incoming_rdma_write_en)
2192 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2193 if (params->incoming_rdma_read_en)
2194 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2195 if (params->incoming_atomic_en)
2196 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2197 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2198 return ib_qp_acc_flags;
2199}
2200
2201int qedr_query_qp(struct ib_qp *ibqp,
2202 struct ib_qp_attr *qp_attr,
2203 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2204{
2205 struct qed_rdma_query_qp_out_params params;
2206 struct qedr_qp *qp = get_qedr_qp(ibqp);
2207 struct qedr_dev *dev = qp->dev;
2208 int rc = 0;
2209
2210 memset(&params, 0, sizeof(params));
2211
2212 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2213 if (rc)
2214 goto err;
2215
2216 memset(qp_attr, 0, sizeof(*qp_attr));
2217 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2218
2219 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2220 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002221 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002222 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2223 qp_attr->rq_psn = params.rq_psn;
2224 qp_attr->sq_psn = params.sq_psn;
2225 qp_attr->dest_qp_num = params.dest_qp;
2226
2227 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2228
2229 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2230 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2231 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2232 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002233 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002234 qp_init_attr->cap = qp_attr->cap;
2235
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04002236 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002237 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2238 params.flow_label, qp->sgid_idx,
2239 params.hop_limit_ttl, params.traffic_class_tos);
2240 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2241 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2242 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002243 qp_attr->timeout = params.timeout;
2244 qp_attr->rnr_retry = params.rnr_retry;
2245 qp_attr->retry_cnt = params.retry_cnt;
2246 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2247 qp_attr->pkey_index = params.pkey_index;
2248 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002249 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2250 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002251 qp_attr->alt_pkey_index = 0;
2252 qp_attr->alt_port_num = 0;
2253 qp_attr->alt_timeout = 0;
2254 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2255
2256 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2257 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2258 qp_attr->max_rd_atomic = params.max_rd_atomic;
2259 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2260
2261 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2262 qp_attr->cap.max_inline_data);
2263
2264err:
2265 return rc;
2266}
2267
Amrani, Ramdf158562016-12-22 14:52:24 +02002268int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2269{
2270 int rc = 0;
2271
2272 if (qp->qp_type != IB_QPT_GSI) {
2273 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2274 if (rc)
2275 return rc;
2276 }
2277
2278 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2279 qedr_cleanup_user(dev, qp);
2280 else
2281 qedr_cleanup_kernel(dev, qp);
2282
2283 return 0;
2284}
2285
Ram Amranicecbcdd2016-10-10 13:15:34 +03002286int qedr_destroy_qp(struct ib_qp *ibqp)
2287{
2288 struct qedr_qp *qp = get_qedr_qp(ibqp);
2289 struct qedr_dev *dev = qp->dev;
2290 struct ib_qp_attr attr;
2291 int attr_mask = 0;
2292 int rc = 0;
2293
2294 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2295 qp, qp->qp_type);
2296
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002297 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2298 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2299 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2300 (qp->state != QED_ROCE_QP_STATE_INIT)) {
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002301
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002302 attr.qp_state = IB_QPS_ERR;
2303 attr_mask |= IB_QP_STATE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002304
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002305 /* Change the QP state to ERROR */
2306 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2307 }
Kalderon, Michale411e052017-07-26 14:41:56 +03002308 } else {
2309 /* Wait for the connect/accept to complete */
2310 if (qp->ep) {
2311 int wait_count = 1;
2312
2313 while (qp->ep->during_connect) {
2314 DP_DEBUG(dev, QEDR_MSG_QP,
2315 "Still in during connect/accept\n");
2316
2317 msleep(100);
2318 if (wait_count++ > 200) {
2319 DP_NOTICE(dev,
2320 "during connect timeout\n");
2321 break;
2322 }
2323 }
2324 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002325 }
2326
Amrani, Ramdf158562016-12-22 14:52:24 +02002327 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002328 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002329
Amrani, Ramdf158562016-12-22 14:52:24 +02002330 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002331
Kalderon, Michalde0089e2017-07-26 14:41:55 +03002332 if (atomic_dec_and_test(&qp->refcnt)) {
2333 qedr_idr_remove(dev, qp->qp_id);
2334 kfree(qp);
2335 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002336 return rc;
2337}
Ram Amranie0290cc2016-10-10 13:15:35 +03002338
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002339struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002340 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002341{
2342 struct qedr_ah *ah;
2343
2344 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2345 if (!ah)
2346 return ERR_PTR(-ENOMEM);
2347
2348 ah->attr = *attr;
2349
2350 return &ah->ibah;
2351}
2352
2353int qedr_destroy_ah(struct ib_ah *ibah)
2354{
2355 struct qedr_ah *ah = get_qedr_ah(ibah);
2356
2357 kfree(ah);
2358 return 0;
2359}
2360
Ram Amranie0290cc2016-10-10 13:15:35 +03002361static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2362{
2363 struct qedr_pbl *pbl, *tmp;
2364
2365 if (info->pbl_table)
2366 list_add_tail(&info->pbl_table->list_entry,
2367 &info->free_pbl_list);
2368
2369 if (!list_empty(&info->inuse_pbl_list))
2370 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2371
2372 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2373 list_del(&pbl->list_entry);
2374 qedr_free_pbl(dev, &info->pbl_info, pbl);
2375 }
2376}
2377
2378static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2379 size_t page_list_len, bool two_layered)
2380{
2381 struct qedr_pbl *tmp;
2382 int rc;
2383
2384 INIT_LIST_HEAD(&info->free_pbl_list);
2385 INIT_LIST_HEAD(&info->inuse_pbl_list);
2386
2387 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2388 page_list_len, two_layered);
2389 if (rc)
2390 goto done;
2391
2392 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002393 if (IS_ERR(info->pbl_table)) {
2394 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002395 goto done;
2396 }
2397
2398 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2399 &info->pbl_table->pa);
2400
2401 /* in usual case we use 2 PBLs, so we add one to free
2402 * list and allocating another one
2403 */
2404 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002405 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002406 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2407 goto done;
2408 }
2409
2410 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2411
2412 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2413
2414done:
2415 if (rc)
2416 free_mr_info(dev, info);
2417
2418 return rc;
2419}
2420
2421struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2422 u64 usr_addr, int acc, struct ib_udata *udata)
2423{
2424 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2425 struct qedr_mr *mr;
2426 struct qedr_pd *pd;
2427 int rc = -ENOMEM;
2428
2429 pd = get_qedr_pd(ibpd);
2430 DP_DEBUG(dev, QEDR_MSG_MR,
2431 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2432 pd->pd_id, start, len, usr_addr, acc);
2433
2434 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2435 return ERR_PTR(-EINVAL);
2436
2437 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2438 if (!mr)
2439 return ERR_PTR(rc);
2440
2441 mr->type = QEDR_MR_USER;
2442
2443 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2444 if (IS_ERR(mr->umem)) {
2445 rc = -EFAULT;
2446 goto err0;
2447 }
2448
2449 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2450 if (rc)
2451 goto err1;
2452
2453 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
Ram Amranie57bb6b2017-06-05 16:32:27 +03002454 &mr->info.pbl_info, mr->umem->page_shift);
Ram Amranie0290cc2016-10-10 13:15:35 +03002455
2456 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2457 if (rc) {
2458 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2459 goto err1;
2460 }
2461
2462 /* Index only, 18 bit long, lkey = itid << 8 | key */
2463 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2464 mr->hw_mr.key = 0;
2465 mr->hw_mr.pd = pd->pd_id;
2466 mr->hw_mr.local_read = 1;
2467 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2468 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2469 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2470 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2471 mr->hw_mr.mw_bind = false;
2472 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2473 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2474 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002475 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002476 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2477 mr->hw_mr.length = len;
2478 mr->hw_mr.vaddr = usr_addr;
2479 mr->hw_mr.zbva = false;
2480 mr->hw_mr.phy_mr = false;
2481 mr->hw_mr.dma_mr = false;
2482
2483 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2484 if (rc) {
2485 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2486 goto err2;
2487 }
2488
2489 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2490 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2491 mr->hw_mr.remote_atomic)
2492 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2493
2494 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2495 mr->ibmr.lkey);
2496 return &mr->ibmr;
2497
2498err2:
2499 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2500err1:
2501 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2502err0:
2503 kfree(mr);
2504 return ERR_PTR(rc);
2505}
2506
2507int qedr_dereg_mr(struct ib_mr *ib_mr)
2508{
2509 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2510 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2511 int rc = 0;
2512
2513 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2514 if (rc)
2515 return rc;
2516
2517 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2518
2519 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2520 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2521
2522 /* it could be user registered memory. */
2523 if (mr->umem)
2524 ib_umem_release(mr->umem);
2525
2526 kfree(mr);
2527
2528 return rc;
2529}
2530
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002531static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2532 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002533{
2534 struct qedr_pd *pd = get_qedr_pd(ibpd);
2535 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2536 struct qedr_mr *mr;
2537 int rc = -ENOMEM;
2538
2539 DP_DEBUG(dev, QEDR_MSG_MR,
2540 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2541 max_page_list_len);
2542
2543 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2544 if (!mr)
2545 return ERR_PTR(rc);
2546
2547 mr->dev = dev;
2548 mr->type = QEDR_MR_FRMR;
2549
2550 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2551 if (rc)
2552 goto err0;
2553
2554 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2555 if (rc) {
2556 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2557 goto err0;
2558 }
2559
2560 /* Index only, 18 bit long, lkey = itid << 8 | key */
2561 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2562 mr->hw_mr.key = 0;
2563 mr->hw_mr.pd = pd->pd_id;
2564 mr->hw_mr.local_read = 1;
2565 mr->hw_mr.local_write = 0;
2566 mr->hw_mr.remote_read = 0;
2567 mr->hw_mr.remote_write = 0;
2568 mr->hw_mr.remote_atomic = 0;
2569 mr->hw_mr.mw_bind = false;
2570 mr->hw_mr.pbl_ptr = 0;
2571 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2572 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2573 mr->hw_mr.fbo = 0;
2574 mr->hw_mr.length = 0;
2575 mr->hw_mr.vaddr = 0;
2576 mr->hw_mr.zbva = false;
2577 mr->hw_mr.phy_mr = true;
2578 mr->hw_mr.dma_mr = false;
2579
2580 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2581 if (rc) {
2582 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2583 goto err1;
2584 }
2585
2586 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2587 mr->ibmr.rkey = mr->ibmr.lkey;
2588
2589 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2590 return mr;
2591
2592err1:
2593 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2594err0:
2595 kfree(mr);
2596 return ERR_PTR(rc);
2597}
2598
2599struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2600 enum ib_mr_type mr_type, u32 max_num_sg)
2601{
2602 struct qedr_dev *dev;
2603 struct qedr_mr *mr;
2604
2605 if (mr_type != IB_MR_TYPE_MEM_REG)
2606 return ERR_PTR(-EINVAL);
2607
2608 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2609
2610 if (IS_ERR(mr))
2611 return ERR_PTR(-EINVAL);
2612
2613 dev = mr->dev;
2614
2615 return &mr->ibmr;
2616}
2617
2618static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2619{
2620 struct qedr_mr *mr = get_qedr_mr(ibmr);
2621 struct qedr_pbl *pbl_table;
2622 struct regpair *pbe;
2623 u32 pbes_in_page;
2624
2625 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2626 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2627 return -ENOMEM;
2628 }
2629
2630 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2631 mr->npages, addr);
2632
2633 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2634 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2635 pbe = (struct regpair *)pbl_table->va;
2636 pbe += mr->npages % pbes_in_page;
2637 pbe->lo = cpu_to_le32((u32)addr);
2638 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2639
2640 mr->npages++;
2641
2642 return 0;
2643}
2644
2645static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2646{
2647 int work = info->completed - info->completed_handled - 1;
2648
2649 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2650 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2651 struct qedr_pbl *pbl;
2652
2653 /* Free all the page list that are possible to be freed
2654 * (all the ones that were invalidated), under the assumption
2655 * that if an FMR was completed successfully that means that
2656 * if there was an invalidate operation before it also ended
2657 */
2658 pbl = list_first_entry(&info->inuse_pbl_list,
2659 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002660 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002661 info->completed_handled++;
2662 }
2663}
2664
2665int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2666 int sg_nents, unsigned int *sg_offset)
2667{
2668 struct qedr_mr *mr = get_qedr_mr(ibmr);
2669
2670 mr->npages = 0;
2671
2672 handle_completed_mrs(mr->dev, &mr->info);
2673 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2674}
2675
2676struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2677{
2678 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2679 struct qedr_pd *pd = get_qedr_pd(ibpd);
2680 struct qedr_mr *mr;
2681 int rc;
2682
2683 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2684 if (!mr)
2685 return ERR_PTR(-ENOMEM);
2686
2687 mr->type = QEDR_MR_DMA;
2688
2689 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2690 if (rc) {
2691 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2692 goto err1;
2693 }
2694
2695 /* index only, 18 bit long, lkey = itid << 8 | key */
2696 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2697 mr->hw_mr.pd = pd->pd_id;
2698 mr->hw_mr.local_read = 1;
2699 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2700 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2701 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2702 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2703 mr->hw_mr.dma_mr = true;
2704
2705 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2706 if (rc) {
2707 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2708 goto err2;
2709 }
2710
2711 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2712 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2713 mr->hw_mr.remote_atomic)
2714 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2715
2716 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2717 return &mr->ibmr;
2718
2719err2:
2720 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2721err1:
2722 kfree(mr);
2723 return ERR_PTR(rc);
2724}
Ram Amraniafa0e132016-10-10 13:15:36 +03002725
2726static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2727{
2728 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2729}
2730
2731static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2732{
2733 int i, len = 0;
2734
2735 for (i = 0; i < num_sge; i++)
2736 len += sg_list[i].length;
2737
2738 return len;
2739}
2740
2741static void swap_wqe_data64(u64 *p)
2742{
2743 int i;
2744
2745 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2746 *p = cpu_to_be64(cpu_to_le64(*p));
2747}
2748
2749static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2750 struct qedr_qp *qp, u8 *wqe_size,
2751 struct ib_send_wr *wr,
2752 struct ib_send_wr **bad_wr, u8 *bits,
2753 u8 bit)
2754{
2755 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2756 char *seg_prt, *wqe;
2757 int i, seg_siz;
2758
2759 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2760 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2761 *bad_wr = wr;
2762 return 0;
2763 }
2764
2765 if (!data_size)
2766 return data_size;
2767
2768 *bits |= bit;
2769
2770 seg_prt = NULL;
2771 wqe = NULL;
2772 seg_siz = 0;
2773
2774 /* Copy data inline */
2775 for (i = 0; i < wr->num_sge; i++) {
2776 u32 len = wr->sg_list[i].length;
2777 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2778
2779 while (len > 0) {
2780 u32 cur;
2781
2782 /* New segment required */
2783 if (!seg_siz) {
2784 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2785 seg_prt = wqe;
2786 seg_siz = sizeof(struct rdma_sq_common_wqe);
2787 (*wqe_size)++;
2788 }
2789
2790 /* Calculate currently allowed length */
2791 cur = min_t(u32, len, seg_siz);
2792 memcpy(seg_prt, src, cur);
2793
2794 /* Update segment variables */
2795 seg_prt += cur;
2796 seg_siz -= cur;
2797
2798 /* Update sge variables */
2799 src += cur;
2800 len -= cur;
2801
2802 /* Swap fully-completed segments */
2803 if (!seg_siz)
2804 swap_wqe_data64((u64 *)wqe);
2805 }
2806 }
2807
2808 /* swap last not completed segment */
2809 if (seg_siz)
2810 swap_wqe_data64((u64 *)wqe);
2811
2812 return data_size;
2813}
2814
2815#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2816 do { \
2817 DMA_REGPAIR_LE(sge->addr, vaddr); \
2818 (sge)->length = cpu_to_le32(vlength); \
2819 (sge)->flags = cpu_to_le32(vflags); \
2820 } while (0)
2821
2822#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2823 do { \
2824 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2825 (hdr)->num_sges = num_sge; \
2826 } while (0)
2827
2828#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2829 do { \
2830 DMA_REGPAIR_LE(sge->addr, vaddr); \
2831 (sge)->length = cpu_to_le32(vlength); \
2832 (sge)->l_key = cpu_to_le32(vlkey); \
2833 } while (0)
2834
2835static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2836 struct ib_send_wr *wr)
2837{
2838 u32 data_size = 0;
2839 int i;
2840
2841 for (i = 0; i < wr->num_sge; i++) {
2842 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2843
2844 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2845 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2846 sge->length = cpu_to_le32(wr->sg_list[i].length);
2847 data_size += wr->sg_list[i].length;
2848 }
2849
2850 if (wqe_size)
2851 *wqe_size += wr->num_sge;
2852
2853 return data_size;
2854}
2855
2856static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2857 struct qedr_qp *qp,
2858 struct rdma_sq_rdma_wqe_1st *rwqe,
2859 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2860 struct ib_send_wr *wr,
2861 struct ib_send_wr **bad_wr)
2862{
2863 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2864 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2865
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002866 if (wr->send_flags & IB_SEND_INLINE &&
2867 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2868 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002869 u8 flags = 0;
2870
2871 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2872 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2873 bad_wr, &rwqe->flags, flags);
2874 }
2875
2876 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2877}
2878
2879static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2880 struct qedr_qp *qp,
2881 struct rdma_sq_send_wqe_1st *swqe,
2882 struct rdma_sq_send_wqe_2st *swqe2,
2883 struct ib_send_wr *wr,
2884 struct ib_send_wr **bad_wr)
2885{
2886 memset(swqe2, 0, sizeof(*swqe2));
2887 if (wr->send_flags & IB_SEND_INLINE) {
2888 u8 flags = 0;
2889
2890 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2891 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2892 bad_wr, &swqe->flags, flags);
2893 }
2894
2895 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2896}
2897
2898static int qedr_prepare_reg(struct qedr_qp *qp,
2899 struct rdma_sq_fmr_wqe_1st *fwqe1,
2900 struct ib_reg_wr *wr)
2901{
2902 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2903 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2904
2905 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2906 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2907 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2908 fwqe1->l_key = wr->key;
2909
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002910 fwqe2->access_ctrl = 0;
2911
Ram Amraniafa0e132016-10-10 13:15:36 +03002912 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2913 !!(wr->access & IB_ACCESS_REMOTE_READ));
2914 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2915 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2916 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2917 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2918 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2919 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2920 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2921 fwqe2->fmr_ctrl = 0;
2922
2923 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2924 ilog2(mr->ibmr.page_size) - 12);
2925
2926 fwqe2->length_hi = 0;
2927 fwqe2->length_lo = mr->ibmr.length;
2928 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2929 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2930
2931 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2932
2933 return 0;
2934}
2935
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002936static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002937{
2938 switch (opcode) {
2939 case IB_WR_RDMA_WRITE:
2940 case IB_WR_RDMA_WRITE_WITH_IMM:
2941 return IB_WC_RDMA_WRITE;
2942 case IB_WR_SEND_WITH_IMM:
2943 case IB_WR_SEND:
2944 case IB_WR_SEND_WITH_INV:
2945 return IB_WC_SEND;
2946 case IB_WR_RDMA_READ:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03002947 case IB_WR_RDMA_READ_WITH_INV:
Ram Amraniafa0e132016-10-10 13:15:36 +03002948 return IB_WC_RDMA_READ;
2949 case IB_WR_ATOMIC_CMP_AND_SWP:
2950 return IB_WC_COMP_SWAP;
2951 case IB_WR_ATOMIC_FETCH_AND_ADD:
2952 return IB_WC_FETCH_ADD;
2953 case IB_WR_REG_MR:
2954 return IB_WC_REG_MR;
2955 case IB_WR_LOCAL_INV:
2956 return IB_WC_LOCAL_INV;
2957 default:
2958 return IB_WC_SEND;
2959 }
2960}
2961
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002962static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002963{
2964 int wq_is_full, err_wr, pbl_is_full;
2965 struct qedr_dev *dev = qp->dev;
2966
2967 /* prevent SQ overflow and/or processing of a bad WR */
2968 err_wr = wr->num_sge > qp->sq.max_sges;
2969 wq_is_full = qedr_wq_is_full(&qp->sq);
2970 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2971 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2972 if (wq_is_full || err_wr || pbl_is_full) {
2973 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2974 DP_ERR(dev,
2975 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2976 qp);
2977 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2978 }
2979
2980 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2981 DP_ERR(dev,
2982 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2983 qp);
2984 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2985 }
2986
2987 if (pbl_is_full &&
2988 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2989 DP_ERR(dev,
2990 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2991 qp);
2992 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2993 }
2994 return false;
2995 }
2996 return true;
2997}
2998
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002999static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03003000 struct ib_send_wr **bad_wr)
3001{
3002 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3003 struct qedr_qp *qp = get_qedr_qp(ibqp);
3004 struct rdma_sq_atomic_wqe_1st *awqe1;
3005 struct rdma_sq_atomic_wqe_2nd *awqe2;
3006 struct rdma_sq_atomic_wqe_3rd *awqe3;
3007 struct rdma_sq_send_wqe_2st *swqe2;
3008 struct rdma_sq_local_inv_wqe *iwqe;
3009 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3010 struct rdma_sq_send_wqe_1st *swqe;
3011 struct rdma_sq_rdma_wqe_1st *rwqe;
3012 struct rdma_sq_fmr_wqe_1st *fwqe1;
3013 struct rdma_sq_common_wqe *wqe;
3014 u32 length;
3015 int rc = 0;
3016 bool comp;
3017
3018 if (!qedr_can_post_send(qp, wr)) {
3019 *bad_wr = wr;
3020 return -ENOMEM;
3021 }
3022
3023 wqe = qed_chain_produce(&qp->sq.pbl);
3024 qp->wqe_wr_id[qp->sq.prod].signaled =
3025 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3026
3027 wqe->flags = 0;
3028 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3029 !!(wr->send_flags & IB_SEND_SOLICITED));
3030 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3031 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3032 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3033 !!(wr->send_flags & IB_SEND_FENCE));
3034 wqe->prev_wqe_size = qp->prev_wqe_size;
3035
3036 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3037
3038 switch (wr->opcode) {
3039 case IB_WR_SEND_WITH_IMM:
3040 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3041 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3042 swqe->wqe_size = 2;
3043 swqe2 = qed_chain_produce(&qp->sq.pbl);
3044
3045 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
3046 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3047 wr, bad_wr);
3048 swqe->length = cpu_to_le32(length);
3049 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3050 qp->prev_wqe_size = swqe->wqe_size;
3051 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3052 break;
3053 case IB_WR_SEND:
3054 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3055 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3056
3057 swqe->wqe_size = 2;
3058 swqe2 = qed_chain_produce(&qp->sq.pbl);
3059 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3060 wr, bad_wr);
3061 swqe->length = cpu_to_le32(length);
3062 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3063 qp->prev_wqe_size = swqe->wqe_size;
3064 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3065 break;
3066 case IB_WR_SEND_WITH_INV:
3067 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3068 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3069 swqe2 = qed_chain_produce(&qp->sq.pbl);
3070 swqe->wqe_size = 2;
3071 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3072 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3073 wr, bad_wr);
3074 swqe->length = cpu_to_le32(length);
3075 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3076 qp->prev_wqe_size = swqe->wqe_size;
3077 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3078 break;
3079
3080 case IB_WR_RDMA_WRITE_WITH_IMM:
3081 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3082 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3083
3084 rwqe->wqe_size = 2;
3085 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3086 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3087 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3088 wr, bad_wr);
3089 rwqe->length = cpu_to_le32(length);
3090 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3091 qp->prev_wqe_size = rwqe->wqe_size;
3092 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3093 break;
3094 case IB_WR_RDMA_WRITE:
3095 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3096 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3097
3098 rwqe->wqe_size = 2;
3099 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3100 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3101 wr, bad_wr);
3102 rwqe->length = cpu_to_le32(length);
3103 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3104 qp->prev_wqe_size = rwqe->wqe_size;
3105 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3106 break;
3107 case IB_WR_RDMA_READ_WITH_INV:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03003108 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3109 /* fallthrough... same is identical to RDMA READ */
Ram Amraniafa0e132016-10-10 13:15:36 +03003110
3111 case IB_WR_RDMA_READ:
3112 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3113 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3114
3115 rwqe->wqe_size = 2;
3116 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3117 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3118 wr, bad_wr);
3119 rwqe->length = cpu_to_le32(length);
3120 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3121 qp->prev_wqe_size = rwqe->wqe_size;
3122 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3123 break;
3124
3125 case IB_WR_ATOMIC_CMP_AND_SWP:
3126 case IB_WR_ATOMIC_FETCH_AND_ADD:
3127 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3128 awqe1->wqe_size = 4;
3129
3130 awqe2 = qed_chain_produce(&qp->sq.pbl);
3131 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3132 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3133
3134 awqe3 = qed_chain_produce(&qp->sq.pbl);
3135
3136 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3137 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3138 DMA_REGPAIR_LE(awqe3->swap_data,
3139 atomic_wr(wr)->compare_add);
3140 } else {
3141 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3142 DMA_REGPAIR_LE(awqe3->swap_data,
3143 atomic_wr(wr)->swap);
3144 DMA_REGPAIR_LE(awqe3->cmp_data,
3145 atomic_wr(wr)->compare_add);
3146 }
3147
3148 qedr_prepare_sq_sges(qp, NULL, wr);
3149
3150 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3151 qp->prev_wqe_size = awqe1->wqe_size;
3152 break;
3153
3154 case IB_WR_LOCAL_INV:
3155 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3156 iwqe->wqe_size = 1;
3157
3158 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3159 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3160 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3161 qp->prev_wqe_size = iwqe->wqe_size;
3162 break;
3163 case IB_WR_REG_MR:
3164 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3165 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3166 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3167 fwqe1->wqe_size = 2;
3168
3169 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3170 if (rc) {
3171 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3172 *bad_wr = wr;
3173 break;
3174 }
3175
3176 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3177 qp->prev_wqe_size = fwqe1->wqe_size;
3178 break;
3179 default:
3180 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3181 rc = -EINVAL;
3182 *bad_wr = wr;
3183 break;
3184 }
3185
3186 if (*bad_wr) {
3187 u16 value;
3188
3189 /* Restore prod to its position before
3190 * this WR was processed
3191 */
3192 value = le16_to_cpu(qp->sq.db_data.data.value);
3193 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3194
3195 /* Restore prev_wqe_size */
3196 qp->prev_wqe_size = wqe->prev_wqe_size;
3197 rc = -EINVAL;
3198 DP_ERR(dev, "POST SEND FAILED\n");
3199 }
3200
3201 return rc;
3202}
3203
3204int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3205 struct ib_send_wr **bad_wr)
3206{
3207 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3208 struct qedr_qp *qp = get_qedr_qp(ibqp);
3209 unsigned long flags;
3210 int rc = 0;
3211
3212 *bad_wr = NULL;
3213
Ram Amrani04886772016-10-10 13:15:38 +03003214 if (qp->qp_type == IB_QPT_GSI)
3215 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3216
Ram Amraniafa0e132016-10-10 13:15:36 +03003217 spin_lock_irqsave(&qp->q_lock, flags);
3218
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003219 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3220 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3221 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3222 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3223 spin_unlock_irqrestore(&qp->q_lock, flags);
3224 *bad_wr = wr;
3225 DP_DEBUG(dev, QEDR_MSG_CQ,
3226 "QP in wrong state! QP icid=0x%x state %d\n",
3227 qp->icid, qp->state);
3228 return -EINVAL;
3229 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003230 }
3231
Ram Amraniafa0e132016-10-10 13:15:36 +03003232 while (wr) {
3233 rc = __qedr_post_send(ibqp, wr, bad_wr);
3234 if (rc)
3235 break;
3236
3237 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3238
3239 qedr_inc_sw_prod(&qp->sq);
3240
3241 qp->sq.db_data.data.value++;
3242
3243 wr = wr->next;
3244 }
3245
3246 /* Trigger doorbell
3247 * If there was a failure in the first WR then it will be triggered in
3248 * vane. However this is not harmful (as long as the producer value is
3249 * unchanged). For performance reasons we avoid checking for this
3250 * redundant doorbell.
3251 */
3252 wmb();
3253 writel(qp->sq.db_data.raw, qp->sq.db);
3254
3255 /* Make sure write sticks */
3256 mmiowb();
3257
3258 spin_unlock_irqrestore(&qp->q_lock, flags);
3259
3260 return rc;
3261}
3262
3263int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3264 struct ib_recv_wr **bad_wr)
3265{
3266 struct qedr_qp *qp = get_qedr_qp(ibqp);
3267 struct qedr_dev *dev = qp->dev;
3268 unsigned long flags;
3269 int status = 0;
3270
Ram Amrani04886772016-10-10 13:15:38 +03003271 if (qp->qp_type == IB_QPT_GSI)
3272 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3273
Ram Amraniafa0e132016-10-10 13:15:36 +03003274 spin_lock_irqsave(&qp->q_lock, flags);
3275
Amrani, Ram922d9a42016-12-22 14:40:38 +02003276 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003277 spin_unlock_irqrestore(&qp->q_lock, flags);
3278 *bad_wr = wr;
3279 return -EINVAL;
3280 }
3281
3282 while (wr) {
3283 int i;
3284
3285 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3286 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3287 wr->num_sge > qp->rq.max_sges) {
3288 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3289 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3290 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3291 qp->rq.max_sges);
3292 status = -ENOMEM;
3293 *bad_wr = wr;
3294 break;
3295 }
3296 for (i = 0; i < wr->num_sge; i++) {
3297 u32 flags = 0;
3298 struct rdma_rq_sge *rqe =
3299 qed_chain_produce(&qp->rq.pbl);
3300
3301 /* First one must include the number
3302 * of SGE in the list
3303 */
3304 if (!i)
3305 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3306 wr->num_sge);
3307
3308 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3309 wr->sg_list[i].lkey);
3310
3311 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3312 wr->sg_list[i].length, flags);
3313 }
3314
3315 /* Special case of no sges. FW requires between 1-4 sges...
3316 * in this case we need to post 1 sge with length zero. this is
3317 * because rdma write with immediate consumes an RQ.
3318 */
3319 if (!wr->num_sge) {
3320 u32 flags = 0;
3321 struct rdma_rq_sge *rqe =
3322 qed_chain_produce(&qp->rq.pbl);
3323
3324 /* First one must include the number
3325 * of SGE in the list
3326 */
3327 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3328 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3329
3330 RQ_SGE_SET(rqe, 0, 0, flags);
3331 i = 1;
3332 }
3333
3334 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3335 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3336
3337 qedr_inc_sw_prod(&qp->rq);
3338
3339 /* Flush all the writes before signalling doorbell */
3340 wmb();
3341
3342 qp->rq.db_data.data.value++;
3343
3344 writel(qp->rq.db_data.raw, qp->rq.db);
3345
3346 /* Make sure write sticks */
3347 mmiowb();
3348
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003349 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3350 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3351 mmiowb(); /* for second doorbell */
3352 }
3353
Ram Amraniafa0e132016-10-10 13:15:36 +03003354 wr = wr->next;
3355 }
3356
3357 spin_unlock_irqrestore(&qp->q_lock, flags);
3358
3359 return status;
3360}
3361
3362static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3363{
3364 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3365
3366 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3367 cq->pbl_toggle;
3368}
3369
3370static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3371{
3372 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3373 struct qedr_qp *qp;
3374
3375 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3376 resp_cqe->qp_handle.lo,
3377 u64);
3378 return qp;
3379}
3380
3381static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3382{
3383 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3384
3385 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3386}
3387
3388/* Return latest CQE (needs processing) */
3389static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3390{
3391 return cq->latest_cqe;
3392}
3393
3394/* In fmr we need to increase the number of fmr completed counter for the fmr
3395 * algorithm determining whether we can free a pbl or not.
3396 * we need to perform this whether the work request was signaled or not. for
3397 * this purpose we call this function from the condition that checks if a wr
3398 * should be skipped, to make sure we don't miss it ( possibly this fmr
3399 * operation was not signalted)
3400 */
3401static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3402{
3403 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3404 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3405}
3406
3407static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3408 struct qedr_cq *cq, int num_entries,
3409 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3410 int force)
3411{
3412 u16 cnt = 0;
3413
3414 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3415 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3416 qedr_chk_if_fmr(qp);
3417 /* skip WC */
3418 goto next_cqe;
3419 }
3420
3421 /* fill WC */
3422 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003423 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003424 wc->wc_flags = 0;
3425 wc->src_qp = qp->id;
3426 wc->qp = &qp->ibqp;
3427
3428 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3429 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3430
3431 switch (wc->opcode) {
3432 case IB_WC_RDMA_WRITE:
3433 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3434 break;
3435 case IB_WC_COMP_SWAP:
3436 case IB_WC_FETCH_ADD:
3437 wc->byte_len = 8;
3438 break;
3439 case IB_WC_REG_MR:
3440 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3441 break;
Michal Kalderondac27382017-06-05 16:32:26 +03003442 case IB_WC_RDMA_READ:
3443 case IB_WC_SEND:
3444 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3445 break;
Ram Amraniafa0e132016-10-10 13:15:36 +03003446 default:
3447 break;
3448 }
3449
3450 num_entries--;
3451 wc++;
3452 cnt++;
3453next_cqe:
3454 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3455 qed_chain_consume(&qp->sq.pbl);
3456 qedr_inc_sw_cons(&qp->sq);
3457 }
3458
3459 return cnt;
3460}
3461
3462static int qedr_poll_cq_req(struct qedr_dev *dev,
3463 struct qedr_qp *qp, struct qedr_cq *cq,
3464 int num_entries, struct ib_wc *wc,
3465 struct rdma_cqe_requester *req)
3466{
3467 int cnt = 0;
3468
3469 switch (req->status) {
3470 case RDMA_CQE_REQ_STS_OK:
3471 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3472 IB_WC_SUCCESS, 0);
3473 break;
3474 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003475 if (qp->state != QED_ROCE_QP_STATE_ERR)
3476 DP_ERR(dev,
3477 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3478 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003479 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003480 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003481 break;
3482 default:
3483 /* process all WQE before the cosumer */
3484 qp->state = QED_ROCE_QP_STATE_ERR;
3485 cnt = process_req(dev, qp, cq, num_entries, wc,
3486 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3487 wc += cnt;
3488 /* if we have extra WC fill it with actual error info */
3489 if (cnt < num_entries) {
3490 enum ib_wc_status wc_status;
3491
3492 switch (req->status) {
3493 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3494 DP_ERR(dev,
3495 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3496 cq->icid, qp->icid);
3497 wc_status = IB_WC_BAD_RESP_ERR;
3498 break;
3499 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3500 DP_ERR(dev,
3501 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3502 cq->icid, qp->icid);
3503 wc_status = IB_WC_LOC_LEN_ERR;
3504 break;
3505 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3506 DP_ERR(dev,
3507 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3508 cq->icid, qp->icid);
3509 wc_status = IB_WC_LOC_QP_OP_ERR;
3510 break;
3511 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3512 DP_ERR(dev,
3513 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3514 cq->icid, qp->icid);
3515 wc_status = IB_WC_LOC_PROT_ERR;
3516 break;
3517 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3518 DP_ERR(dev,
3519 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3520 cq->icid, qp->icid);
3521 wc_status = IB_WC_MW_BIND_ERR;
3522 break;
3523 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3524 DP_ERR(dev,
3525 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3526 cq->icid, qp->icid);
3527 wc_status = IB_WC_REM_INV_REQ_ERR;
3528 break;
3529 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3530 DP_ERR(dev,
3531 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3532 cq->icid, qp->icid);
3533 wc_status = IB_WC_REM_ACCESS_ERR;
3534 break;
3535 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3536 DP_ERR(dev,
3537 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3538 cq->icid, qp->icid);
3539 wc_status = IB_WC_REM_OP_ERR;
3540 break;
3541 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3542 DP_ERR(dev,
3543 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3544 cq->icid, qp->icid);
3545 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3546 break;
3547 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3548 DP_ERR(dev,
3549 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3550 cq->icid, qp->icid);
3551 wc_status = IB_WC_RETRY_EXC_ERR;
3552 break;
3553 default:
3554 DP_ERR(dev,
3555 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3556 cq->icid, qp->icid);
3557 wc_status = IB_WC_GENERAL_ERR;
3558 }
3559 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3560 wc_status, 1);
3561 }
3562 }
3563
3564 return cnt;
3565}
3566
Amrani, Ramb6acd712017-04-27 13:35:35 +03003567static inline int qedr_cqe_resp_status_to_ib(u8 status)
3568{
3569 switch (status) {
3570 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3571 return IB_WC_LOC_ACCESS_ERR;
3572 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3573 return IB_WC_LOC_LEN_ERR;
3574 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3575 return IB_WC_LOC_QP_OP_ERR;
3576 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3577 return IB_WC_LOC_PROT_ERR;
3578 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3579 return IB_WC_MW_BIND_ERR;
3580 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3581 return IB_WC_REM_INV_RD_REQ_ERR;
3582 case RDMA_CQE_RESP_STS_OK:
3583 return IB_WC_SUCCESS;
3584 default:
3585 return IB_WC_GENERAL_ERR;
3586 }
3587}
3588
3589static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3590 struct ib_wc *wc)
3591{
3592 wc->status = IB_WC_SUCCESS;
3593 wc->byte_len = le32_to_cpu(resp->length);
3594
3595 if (resp->flags & QEDR_RESP_IMM) {
3596 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3597 wc->wc_flags |= IB_WC_WITH_IMM;
3598
3599 if (resp->flags & QEDR_RESP_RDMA)
3600 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3601
3602 if (resp->flags & QEDR_RESP_INV)
3603 return -EINVAL;
3604
3605 } else if (resp->flags & QEDR_RESP_INV) {
3606 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3607 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3608
3609 if (resp->flags & QEDR_RESP_RDMA)
3610 return -EINVAL;
3611
3612 } else if (resp->flags & QEDR_RESP_RDMA) {
3613 return -EINVAL;
3614 }
3615
3616 return 0;
3617}
3618
Ram Amraniafa0e132016-10-10 13:15:36 +03003619static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3620 struct qedr_cq *cq, struct ib_wc *wc,
3621 struct rdma_cqe_responder *resp, u64 wr_id)
3622{
Amrani, Ramb6acd712017-04-27 13:35:35 +03003623 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
Ram Amraniafa0e132016-10-10 13:15:36 +03003624 wc->opcode = IB_WC_RECV;
3625 wc->wc_flags = 0;
3626
Amrani, Ramb6acd712017-04-27 13:35:35 +03003627 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3628 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3629 DP_ERR(dev,
3630 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3631 cq, cq->icid, resp->flags);
Ram Amraniafa0e132016-10-10 13:15:36 +03003632
Amrani, Ramb6acd712017-04-27 13:35:35 +03003633 } else {
3634 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3635 if (wc->status == IB_WC_GENERAL_ERR)
3636 DP_ERR(dev,
3637 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3638 cq, cq->icid, resp->status);
Ram Amraniafa0e132016-10-10 13:15:36 +03003639 }
3640
Amrani, Ramb6acd712017-04-27 13:35:35 +03003641 /* Fill the rest of the WC */
Amrani, Ram27035a12016-12-22 14:40:39 +02003642 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003643 wc->src_qp = qp->id;
3644 wc->qp = &qp->ibqp;
3645 wc->wr_id = wr_id;
3646}
3647
3648static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3649 struct qedr_cq *cq, struct ib_wc *wc,
3650 struct rdma_cqe_responder *resp)
3651{
3652 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3653
3654 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3655
3656 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3657 qed_chain_consume(&qp->rq.pbl);
3658 qedr_inc_sw_cons(&qp->rq);
3659
3660 return 1;
3661}
3662
3663static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3664 int num_entries, struct ib_wc *wc, u16 hw_cons)
3665{
3666 u16 cnt = 0;
3667
3668 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3669 /* fill WC */
3670 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003671 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003672 wc->wc_flags = 0;
3673 wc->src_qp = qp->id;
3674 wc->byte_len = 0;
3675 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3676 wc->qp = &qp->ibqp;
3677 num_entries--;
3678 wc++;
3679 cnt++;
3680 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3681 qed_chain_consume(&qp->rq.pbl);
3682 qedr_inc_sw_cons(&qp->rq);
3683 }
3684
3685 return cnt;
3686}
3687
3688static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3689 struct rdma_cqe_responder *resp, int *update)
3690{
3691 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3692 consume_cqe(cq);
3693 *update |= 1;
3694 }
3695}
3696
3697static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3698 struct qedr_cq *cq, int num_entries,
3699 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3700 int *update)
3701{
3702 int cnt;
3703
3704 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3705 cnt = process_resp_flush(qp, cq, num_entries, wc,
3706 resp->rq_cons);
3707 try_consume_resp_cqe(cq, qp, resp, update);
3708 } else {
3709 cnt = process_resp_one(dev, qp, cq, wc, resp);
3710 consume_cqe(cq);
3711 *update |= 1;
3712 }
3713
3714 return cnt;
3715}
3716
3717static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3718 struct rdma_cqe_requester *req, int *update)
3719{
3720 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3721 consume_cqe(cq);
3722 *update |= 1;
3723 }
3724}
3725
3726int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3727{
3728 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3729 struct qedr_cq *cq = get_qedr_cq(ibcq);
3730 union rdma_cqe *cqe = cq->latest_cqe;
3731 u32 old_cons, new_cons;
3732 unsigned long flags;
3733 int update = 0;
3734 int done = 0;
3735
Amrani, Ram4dd72632017-04-27 13:35:34 +03003736 if (cq->destroyed) {
3737 DP_ERR(dev,
3738 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3739 cq, cq->icid);
3740 return 0;
3741 }
3742
Ram Amrani04886772016-10-10 13:15:38 +03003743 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3744 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3745
Ram Amraniafa0e132016-10-10 13:15:36 +03003746 spin_lock_irqsave(&cq->cq_lock, flags);
3747 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3748 while (num_entries && is_valid_cqe(cq, cqe)) {
3749 struct qedr_qp *qp;
3750 int cnt = 0;
3751
3752 /* prevent speculative reads of any field of CQE */
3753 rmb();
3754
3755 qp = cqe_get_qp(cqe);
3756 if (!qp) {
3757 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3758 break;
3759 }
3760
3761 wc->qp = &qp->ibqp;
3762
3763 switch (cqe_get_type(cqe)) {
3764 case RDMA_CQE_TYPE_REQUESTER:
3765 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3766 &cqe->req);
3767 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3768 break;
3769 case RDMA_CQE_TYPE_RESPONDER_RQ:
3770 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3771 &cqe->resp, &update);
3772 break;
3773 case RDMA_CQE_TYPE_INVALID:
3774 default:
3775 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3776 cqe_get_type(cqe));
3777 }
3778 num_entries -= cnt;
3779 wc += cnt;
3780 done += cnt;
3781
3782 cqe = get_cqe(cq);
3783 }
3784 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3785
3786 cq->cq_cons += new_cons - old_cons;
3787
3788 if (update)
3789 /* doorbell notifies abount latest VALID entry,
3790 * but chain already point to the next INVALID one
3791 */
3792 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3793
3794 spin_unlock_irqrestore(&cq->cq_lock, flags);
3795 return done;
3796}
Ram Amrani993d1b52016-10-10 13:15:39 +03003797
3798int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3799 u8 port_num,
3800 const struct ib_wc *in_wc,
3801 const struct ib_grh *in_grh,
3802 const struct ib_mad_hdr *mad_hdr,
3803 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3804 size_t *out_mad_size, u16 *out_mad_pkey_index)
3805{
3806 struct qedr_dev *dev = get_qedr_dev(ibdev);
3807
3808 DP_DEBUG(dev, QEDR_MSG_GSI,
3809 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3810 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3811 mad_hdr->class_specific, mad_hdr->class_version,
3812 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3813 return IB_MAD_RESULT_SUCCESS;
3814}