blob: 237ae5e40e766a7c318269746fc79413ea00e170 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Kalderon, Michal99d195c2017-07-26 14:41:51 +030052#include "qedr_roce_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
Amrani, Ramc75d3ec2017-06-26 19:05:04 +030056static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
Ram Amrania7efd772016-10-10 13:15:33 +030064int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
Kalderon, Michale6a38c52017-07-26 14:41:52 +030073int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
Ram Amraniac1b36e2016-10-10 13:15:32 +030087int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
88 union ib_gid *sgid)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 int rc = 0;
92
93 if (!rdma_cap_roce_gid_table(ibdev, port))
94 return -ENODEV;
95
96 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
97 if (rc == -EAGAIN) {
98 memcpy(sgid, &zgid, sizeof(*sgid));
99 return 0;
100 }
101
102 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
103 sgid->global.interface_id, sgid->global.subnet_prefix);
104
105 return rc;
106}
107
108int qedr_add_gid(struct ib_device *device, u8 port_num,
109 unsigned int index, const union ib_gid *gid,
110 const struct ib_gid_attr *attr, void **context)
111{
112 if (!rdma_cap_roce_gid_table(device, port_num))
113 return -EINVAL;
114
115 if (port_num > QEDR_MAX_PORT)
116 return -EINVAL;
117
118 if (!context)
119 return -EINVAL;
120
121 return 0;
122}
123
124int qedr_del_gid(struct ib_device *device, u8 port_num,
125 unsigned int index, void **context)
126{
127 if (!rdma_cap_roce_gid_table(device, port_num))
128 return -EINVAL;
129
130 if (port_num > QEDR_MAX_PORT)
131 return -EINVAL;
132
133 if (!context)
134 return -EINVAL;
135
136 return 0;
137}
138
139int qedr_query_device(struct ib_device *ibdev,
140 struct ib_device_attr *attr, struct ib_udata *udata)
141{
142 struct qedr_dev *dev = get_qedr_dev(ibdev);
143 struct qedr_device_attr *qattr = &dev->attr;
144
145 if (!dev->rdma_ctx) {
146 DP_ERR(dev,
147 "qedr_query_device called with invalid params rdma_ctx=%p\n",
148 dev->rdma_ctx);
149 return -EINVAL;
150 }
151
152 memset(attr, 0, sizeof(*attr));
153
154 attr->fw_ver = qattr->fw_ver;
155 attr->sys_image_guid = qattr->sys_image_guid;
156 attr->max_mr_size = qattr->max_mr_size;
157 attr->page_size_cap = qattr->page_size_caps;
158 attr->vendor_id = qattr->vendor_id;
159 attr->vendor_part_id = qattr->vendor_part_id;
160 attr->hw_ver = qattr->hw_ver;
161 attr->max_qp = qattr->max_qp;
162 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
163 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
164 IB_DEVICE_RC_RNR_NAK_GEN |
165 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
166
167 attr->max_sge = qattr->max_sge;
168 attr->max_sge_rd = qattr->max_sge;
169 attr->max_cq = qattr->max_cq;
170 attr->max_cqe = qattr->max_cqe;
171 attr->max_mr = qattr->max_mr;
172 attr->max_mw = qattr->max_mw;
173 attr->max_pd = qattr->max_pd;
174 attr->atomic_cap = dev->atomic_cap;
175 attr->max_fmr = qattr->max_fmr;
176 attr->max_map_per_fmr = 16;
177 attr->max_qp_init_rd_atom =
178 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
179 attr->max_qp_rd_atom =
180 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
181 attr->max_qp_init_rd_atom);
182
183 attr->max_srq = qattr->max_srq;
184 attr->max_srq_sge = qattr->max_srq_sge;
185 attr->max_srq_wr = qattr->max_srq_wr;
186
187 attr->local_ca_ack_delay = qattr->dev_ack_delay;
188 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
189 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
190 attr->max_ah = qattr->max_ah;
191
192 return 0;
193}
194
195#define QEDR_SPEED_SDR (1)
196#define QEDR_SPEED_DDR (2)
197#define QEDR_SPEED_QDR (4)
198#define QEDR_SPEED_FDR10 (8)
199#define QEDR_SPEED_FDR (16)
200#define QEDR_SPEED_EDR (32)
201
202static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
203 u8 *ib_width)
204{
205 switch (speed) {
206 case 1000:
207 *ib_speed = QEDR_SPEED_SDR;
208 *ib_width = IB_WIDTH_1X;
209 break;
210 case 10000:
211 *ib_speed = QEDR_SPEED_QDR;
212 *ib_width = IB_WIDTH_1X;
213 break;
214
215 case 20000:
216 *ib_speed = QEDR_SPEED_DDR;
217 *ib_width = IB_WIDTH_4X;
218 break;
219
220 case 25000:
221 *ib_speed = QEDR_SPEED_EDR;
222 *ib_width = IB_WIDTH_1X;
223 break;
224
225 case 40000:
226 *ib_speed = QEDR_SPEED_QDR;
227 *ib_width = IB_WIDTH_4X;
228 break;
229
230 case 50000:
231 *ib_speed = QEDR_SPEED_QDR;
232 *ib_width = IB_WIDTH_4X;
233 break;
234
235 case 100000:
236 *ib_speed = QEDR_SPEED_EDR;
237 *ib_width = IB_WIDTH_4X;
238 break;
239
240 default:
241 /* Unsupported */
242 *ib_speed = QEDR_SPEED_SDR;
243 *ib_width = IB_WIDTH_1X;
244 }
245}
246
247int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
248{
249 struct qedr_dev *dev;
250 struct qed_rdma_port *rdma_port;
251
252 dev = get_qedr_dev(ibdev);
253 if (port > 1) {
254 DP_ERR(dev, "invalid_port=0x%x\n", port);
255 return -EINVAL;
256 }
257
258 if (!dev->rdma_ctx) {
259 DP_ERR(dev, "rdma_ctx is NULL\n");
260 return -EINVAL;
261 }
262
263 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300264
Or Gerlitzc4550c62017-01-24 13:02:39 +0200265 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300266 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
267 attr->state = IB_PORT_ACTIVE;
268 attr->phys_state = 5;
269 } else {
270 attr->state = IB_PORT_DOWN;
271 attr->phys_state = 3;
272 }
273 attr->max_mtu = IB_MTU_4096;
274 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
275 attr->lid = 0;
276 attr->lmc = 0;
277 attr->sm_lid = 0;
278 attr->sm_sl = 0;
279 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
Kalderon, Michalf5b1b172017-07-26 14:41:53 +0300280 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
281 attr->gid_tbl_len = 1;
282 attr->pkey_tbl_len = 1;
283 } else {
284 attr->gid_tbl_len = QEDR_MAX_SGID;
285 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
286 }
Ram Amraniac1b36e2016-10-10 13:15:32 +0300287 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
288 attr->qkey_viol_cntr = 0;
289 get_link_speed_and_width(rdma_port->link_speed,
290 &attr->active_speed, &attr->active_width);
291 attr->max_msg_sz = rdma_port->max_msg_size;
292 attr->max_vl_num = 4;
293
294 return 0;
295}
296
297int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
298 struct ib_port_modify *props)
299{
300 struct qedr_dev *dev;
301
302 dev = get_qedr_dev(ibdev);
303 if (port > 1) {
304 DP_ERR(dev, "invalid_port=0x%x\n", port);
305 return -EINVAL;
306 }
307
308 return 0;
309}
310
311static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
312 unsigned long len)
313{
314 struct qedr_mm *mm;
315
316 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
317 if (!mm)
318 return -ENOMEM;
319
320 mm->key.phy_addr = phy_addr;
321 /* This function might be called with a length which is not a multiple
322 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
323 * forces this granularity by increasing the requested size if needed.
324 * When qedr_mmap is called, it will search the list with the updated
325 * length as a key. To prevent search failures, the length is rounded up
326 * in advance to PAGE_SIZE.
327 */
328 mm->key.len = roundup(len, PAGE_SIZE);
329 INIT_LIST_HEAD(&mm->entry);
330
331 mutex_lock(&uctx->mm_list_lock);
332 list_add(&mm->entry, &uctx->mm_head);
333 mutex_unlock(&uctx->mm_list_lock);
334
335 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
336 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
337 (unsigned long long)mm->key.phy_addr,
338 (unsigned long)mm->key.len, uctx);
339
340 return 0;
341}
342
343static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
344 unsigned long len)
345{
346 bool found = false;
347 struct qedr_mm *mm;
348
349 mutex_lock(&uctx->mm_list_lock);
350 list_for_each_entry(mm, &uctx->mm_head, entry) {
351 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
352 continue;
353
354 found = true;
355 break;
356 }
357 mutex_unlock(&uctx->mm_list_lock);
358 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
359 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
360 mm->key.phy_addr, mm->key.len, uctx, found);
361
362 return found;
363}
364
365struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
366 struct ib_udata *udata)
367{
368 int rc;
369 struct qedr_ucontext *ctx;
370 struct qedr_alloc_ucontext_resp uresp;
371 struct qedr_dev *dev = get_qedr_dev(ibdev);
372 struct qed_rdma_add_user_out_params oparams;
373
374 if (!udata)
375 return ERR_PTR(-EFAULT);
376
377 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
378 if (!ctx)
379 return ERR_PTR(-ENOMEM);
380
381 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
382 if (rc) {
383 DP_ERR(dev,
384 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
385 rc);
386 goto err;
387 }
388
389 ctx->dpi = oparams.dpi;
390 ctx->dpi_addr = oparams.dpi_addr;
391 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
392 ctx->dpi_size = oparams.dpi_size;
393 INIT_LIST_HEAD(&ctx->mm_head);
394 mutex_init(&ctx->mm_list_lock);
395
396 memset(&uresp, 0, sizeof(uresp));
397
398 uresp.db_pa = ctx->dpi_phys_addr;
399 uresp.db_size = ctx->dpi_size;
400 uresp.max_send_wr = dev->attr.max_sqe;
401 uresp.max_recv_wr = dev->attr.max_rqe;
402 uresp.max_srq_wr = dev->attr.max_srq_wr;
403 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
404 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
405 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
406 uresp.max_cqes = QEDR_MAX_CQES;
407
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300408 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amraniac1b36e2016-10-10 13:15:32 +0300409 if (rc)
410 goto err;
411
412 ctx->dev = dev;
413
414 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
415 if (rc)
416 goto err;
417
418 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
419 &ctx->ibucontext);
420 return &ctx->ibucontext;
421
422err:
423 kfree(ctx);
424 return ERR_PTR(rc);
425}
426
427int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
428{
429 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
430 struct qedr_mm *mm, *tmp;
431 int status = 0;
432
433 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
434 uctx);
435 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
436
437 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
438 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
439 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
440 mm->key.phy_addr, mm->key.len, uctx);
441 list_del(&mm->entry);
442 kfree(mm);
443 }
444
445 kfree(uctx);
446 return status;
447}
448
449int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
450{
451 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
452 struct qedr_dev *dev = get_qedr_dev(context->device);
453 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
454 u64 unmapped_db = dev->db_phys_addr;
455 unsigned long len = (vma->vm_end - vma->vm_start);
456 int rc = 0;
457 bool found;
458
459 DP_DEBUG(dev, QEDR_MSG_INIT,
460 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
461 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
462 if (vma->vm_start & (PAGE_SIZE - 1)) {
463 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
464 vma->vm_start);
465 return -EINVAL;
466 }
467
468 found = qedr_search_mmap(ucontext, vm_page, len);
469 if (!found) {
470 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
471 vma->vm_pgoff);
472 return -EINVAL;
473 }
474
475 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
476
477 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
478 dev->db_size))) {
479 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
480 if (vma->vm_flags & VM_READ) {
481 DP_ERR(dev, "Trying to map doorbell bar for read\n");
482 return -EPERM;
483 }
484
485 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
486
487 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
488 PAGE_SIZE, vma->vm_page_prot);
489 } else {
490 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
491 rc = remap_pfn_range(vma, vma->vm_start,
492 vma->vm_pgoff, len, vma->vm_page_prot);
493 }
494 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
495 return rc;
496}
Ram Amrania7efd772016-10-10 13:15:33 +0300497
498struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
499 struct ib_ucontext *context, struct ib_udata *udata)
500{
501 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300502 struct qedr_pd *pd;
503 u16 pd_id;
504 int rc;
505
506 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
507 (udata && context) ? "User Lib" : "Kernel");
508
509 if (!dev->rdma_ctx) {
510 DP_ERR(dev, "invlaid RDMA context\n");
511 return ERR_PTR(-EINVAL);
512 }
513
514 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
515 if (!pd)
516 return ERR_PTR(-ENOMEM);
517
Ram Amrani9c1e0222017-01-24 13:51:42 +0200518 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
519 if (rc)
520 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300521
Ram Amrania7efd772016-10-10 13:15:33 +0300522 pd->pd_id = pd_id;
523
524 if (udata && context) {
Ram Amrani9c1e0222017-01-24 13:51:42 +0200525 struct qedr_alloc_pd_uresp uresp;
526
527 uresp.pd_id = pd_id;
528
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300529 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200530 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300531 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200532 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
533 goto err;
534 }
535
536 pd->uctx = get_qedr_ucontext(context);
537 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300538 }
539
540 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200541
542err:
543 kfree(pd);
544 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300545}
546
547int qedr_dealloc_pd(struct ib_pd *ibpd)
548{
549 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
550 struct qedr_pd *pd = get_qedr_pd(ibpd);
551
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100552 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300553 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100554 return -EINVAL;
555 }
Ram Amrania7efd772016-10-10 13:15:33 +0300556
557 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
558 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
559
560 kfree(pd);
561
562 return 0;
563}
564
565static void qedr_free_pbl(struct qedr_dev *dev,
566 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
567{
568 struct pci_dev *pdev = dev->pdev;
569 int i;
570
571 for (i = 0; i < pbl_info->num_pbls; i++) {
572 if (!pbl[i].va)
573 continue;
574 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
575 pbl[i].va, pbl[i].pa);
576 }
577
578 kfree(pbl);
579}
580
581#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
582#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
583
584#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
585#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
586#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
587
588static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
589 struct qedr_pbl_info *pbl_info,
590 gfp_t flags)
591{
592 struct pci_dev *pdev = dev->pdev;
593 struct qedr_pbl *pbl_table;
594 dma_addr_t *pbl_main_tbl;
595 dma_addr_t pa;
596 void *va;
597 int i;
598
599 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
600 if (!pbl_table)
601 return ERR_PTR(-ENOMEM);
602
603 for (i = 0; i < pbl_info->num_pbls; i++) {
604 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
605 &pa, flags);
606 if (!va)
607 goto err;
608
609 memset(va, 0, pbl_info->pbl_size);
610 pbl_table[i].va = va;
611 pbl_table[i].pa = pa;
612 }
613
614 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
615 * the first one with physical pointers to all of the rest
616 */
617 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
618 for (i = 0; i < pbl_info->num_pbls - 1; i++)
619 pbl_main_tbl[i] = pbl_table[i + 1].pa;
620
621 return pbl_table;
622
623err:
624 for (i--; i >= 0; i--)
625 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
626 pbl_table[i].va, pbl_table[i].pa);
627
628 qedr_free_pbl(dev, pbl_info, pbl_table);
629
630 return ERR_PTR(-ENOMEM);
631}
632
633static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
634 struct qedr_pbl_info *pbl_info,
635 u32 num_pbes, int two_layer_capable)
636{
637 u32 pbl_capacity;
638 u32 pbl_size;
639 u32 num_pbls;
640
641 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
642 if (num_pbes > MAX_PBES_TWO_LAYER) {
643 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
644 num_pbes);
645 return -EINVAL;
646 }
647
648 /* calculate required pbl page size */
649 pbl_size = MIN_FW_PBL_PAGE_SIZE;
650 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
651 NUM_PBES_ON_PAGE(pbl_size);
652
653 while (pbl_capacity < num_pbes) {
654 pbl_size *= 2;
655 pbl_capacity = pbl_size / sizeof(u64);
656 pbl_capacity = pbl_capacity * pbl_capacity;
657 }
658
659 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
660 num_pbls++; /* One for the layer0 ( points to the pbls) */
661 pbl_info->two_layered = true;
662 } else {
663 /* One layered PBL */
664 num_pbls = 1;
665 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
666 roundup_pow_of_two((num_pbes * sizeof(u64))));
667 pbl_info->two_layered = false;
668 }
669
670 pbl_info->num_pbls = num_pbls;
671 pbl_info->pbl_size = pbl_size;
672 pbl_info->num_pbes = num_pbes;
673
674 DP_DEBUG(dev, QEDR_MSG_MR,
675 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
676 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
677
678 return 0;
679}
680
681static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
682 struct qedr_pbl *pbl,
Ram Amranie57bb6b2017-06-05 16:32:27 +0300683 struct qedr_pbl_info *pbl_info, u32 pg_shift)
Ram Amrania7efd772016-10-10 13:15:33 +0300684{
685 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300686 u32 fw_pg_cnt, fw_pg_per_umem_pg;
Ram Amrania7efd772016-10-10 13:15:33 +0300687 struct qedr_pbl *pbl_tbl;
688 struct scatterlist *sg;
689 struct regpair *pbe;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300690 u64 pg_addr;
Ram Amrania7efd772016-10-10 13:15:33 +0300691 int entry;
Ram Amrania7efd772016-10-10 13:15:33 +0300692
693 if (!pbl_info->num_pbes)
694 return;
695
696 /* If we have a two layered pbl, the first pbl points to the rest
697 * of the pbls and the first entry lays on the second pbl in the table
698 */
699 if (pbl_info->two_layered)
700 pbl_tbl = &pbl[1];
701 else
702 pbl_tbl = pbl;
703
704 pbe = (struct regpair *)pbl_tbl->va;
705 if (!pbe) {
706 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
707 return;
708 }
709
710 pbe_cnt = 0;
711
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300712 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300713
Ram Amranie57bb6b2017-06-05 16:32:27 +0300714 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
715
Ram Amrania7efd772016-10-10 13:15:33 +0300716 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
717 pages = sg_dma_len(sg) >> shift;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300718 pg_addr = sg_dma_address(sg);
Ram Amrania7efd772016-10-10 13:15:33 +0300719 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
Ram Amranie57bb6b2017-06-05 16:32:27 +0300720 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
721 pbe->lo = cpu_to_le32(pg_addr);
722 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
Ram Amrania7efd772016-10-10 13:15:33 +0300723
Ram Amranie57bb6b2017-06-05 16:32:27 +0300724 pg_addr += BIT(pg_shift);
725 pbe_cnt++;
726 total_num_pbes++;
727 pbe++;
Ram Amrania7efd772016-10-10 13:15:33 +0300728
Ram Amranie57bb6b2017-06-05 16:32:27 +0300729 if (total_num_pbes == pbl_info->num_pbes)
730 return;
731
732 /* If the given pbl is full storing the pbes,
733 * move to next pbl.
734 */
735 if (pbe_cnt ==
736 (pbl_info->pbl_size / sizeof(u64))) {
737 pbl_tbl++;
738 pbe = (struct regpair *)pbl_tbl->va;
739 pbe_cnt = 0;
740 }
741
742 fw_pg_cnt++;
Ram Amrania7efd772016-10-10 13:15:33 +0300743 }
744 }
745 }
746}
747
748static int qedr_copy_cq_uresp(struct qedr_dev *dev,
749 struct qedr_cq *cq, struct ib_udata *udata)
750{
751 struct qedr_create_cq_uresp uresp;
752 int rc;
753
754 memset(&uresp, 0, sizeof(uresp));
755
756 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
757 uresp.icid = cq->icid;
758
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300759 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrania7efd772016-10-10 13:15:33 +0300760 if (rc)
761 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
762
763 return rc;
764}
765
766static void consume_cqe(struct qedr_cq *cq)
767{
768 if (cq->latest_cqe == cq->toggle_cqe)
769 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
770
771 cq->latest_cqe = qed_chain_consume(&cq->pbl);
772}
773
774static inline int qedr_align_cq_entries(int entries)
775{
776 u64 size, aligned_size;
777
778 /* We allocate an extra entry that we don't report to the FW. */
779 size = (entries + 1) * QEDR_CQE_SIZE;
780 aligned_size = ALIGN(size, PAGE_SIZE);
781
782 return aligned_size / QEDR_CQE_SIZE;
783}
784
785static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
786 struct qedr_dev *dev,
787 struct qedr_userq *q,
788 u64 buf_addr, size_t buf_len,
789 int access, int dmasync)
790{
Ram Amranie57bb6b2017-06-05 16:32:27 +0300791 u32 fw_pages;
Ram Amrania7efd772016-10-10 13:15:33 +0300792 int rc;
793
794 q->buf_addr = buf_addr;
795 q->buf_len = buf_len;
796 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
797 if (IS_ERR(q->umem)) {
798 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
799 PTR_ERR(q->umem));
800 return PTR_ERR(q->umem);
801 }
802
Ram Amranie57bb6b2017-06-05 16:32:27 +0300803 fw_pages = ib_umem_page_count(q->umem) <<
804 (q->umem->page_shift - FW_PAGE_SHIFT);
805
806 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
Ram Amrania7efd772016-10-10 13:15:33 +0300807 if (rc)
808 goto err0;
809
810 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +0100811 if (IS_ERR(q->pbl_tbl)) {
812 rc = PTR_ERR(q->pbl_tbl);
Ram Amrania7efd772016-10-10 13:15:33 +0300813 goto err0;
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +0100814 }
Ram Amrania7efd772016-10-10 13:15:33 +0300815
Ram Amranie57bb6b2017-06-05 16:32:27 +0300816 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
817 FW_PAGE_SHIFT);
Ram Amrania7efd772016-10-10 13:15:33 +0300818
819 return 0;
820
821err0:
822 ib_umem_release(q->umem);
823
824 return rc;
825}
826
827static inline void qedr_init_cq_params(struct qedr_cq *cq,
828 struct qedr_ucontext *ctx,
829 struct qedr_dev *dev, int vector,
830 int chain_entries, int page_cnt,
831 u64 pbl_ptr,
832 struct qed_rdma_create_cq_in_params
833 *params)
834{
835 memset(params, 0, sizeof(*params));
836 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
837 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
838 params->cnq_id = vector;
839 params->cq_size = chain_entries - 1;
840 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
841 params->pbl_num_pages = page_cnt;
842 params->pbl_ptr = pbl_ptr;
843 params->pbl_two_level = 0;
844}
845
846static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
847{
848 /* Flush data before signalling doorbell */
849 wmb();
850 cq->db.data.agg_flags = flags;
851 cq->db.data.value = cpu_to_le32(cons);
852 writeq(cq->db.raw, cq->db_addr);
853
854 /* Make sure write would stick */
855 mmiowb();
856}
857
858int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
859{
860 struct qedr_cq *cq = get_qedr_cq(ibcq);
861 unsigned long sflags;
Amrani, Ram4dd72632017-04-27 13:35:34 +0300862 struct qedr_dev *dev;
863
864 dev = get_qedr_dev(ibcq->device);
865
866 if (cq->destroyed) {
867 DP_ERR(dev,
868 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
869 cq, cq->icid);
870 return -EINVAL;
871 }
872
Ram Amrania7efd772016-10-10 13:15:33 +0300873
874 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
875 return 0;
876
877 spin_lock_irqsave(&cq->cq_lock, sflags);
878
879 cq->arm_flags = 0;
880
881 if (flags & IB_CQ_SOLICITED)
882 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
883
884 if (flags & IB_CQ_NEXT_COMP)
885 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
886
887 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
888
889 spin_unlock_irqrestore(&cq->cq_lock, sflags);
890
891 return 0;
892}
893
894struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
895 const struct ib_cq_init_attr *attr,
896 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
897{
898 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
899 struct qed_rdma_destroy_cq_out_params destroy_oparams;
900 struct qed_rdma_destroy_cq_in_params destroy_iparams;
901 struct qedr_dev *dev = get_qedr_dev(ibdev);
902 struct qed_rdma_create_cq_in_params params;
903 struct qedr_create_cq_ureq ureq;
904 int vector = attr->comp_vector;
905 int entries = attr->cqe;
906 struct qedr_cq *cq;
907 int chain_entries;
908 int page_cnt;
909 u64 pbl_ptr;
910 u16 icid;
911 int rc;
912
913 DP_DEBUG(dev, QEDR_MSG_INIT,
914 "create_cq: called from %s. entries=%d, vector=%d\n",
915 udata ? "User Lib" : "Kernel", entries, vector);
916
917 if (entries > QEDR_MAX_CQES) {
918 DP_ERR(dev,
919 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
920 entries, QEDR_MAX_CQES);
921 return ERR_PTR(-EINVAL);
922 }
923
924 chain_entries = qedr_align_cq_entries(entries);
925 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
926
927 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
928 if (!cq)
929 return ERR_PTR(-ENOMEM);
930
931 if (udata) {
932 memset(&ureq, 0, sizeof(ureq));
933 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
934 DP_ERR(dev,
935 "create cq: problem copying data from user space\n");
936 goto err0;
937 }
938
939 if (!ureq.len) {
940 DP_ERR(dev,
941 "create cq: cannot create a cq with 0 entries\n");
942 goto err0;
943 }
944
945 cq->cq_type = QEDR_CQ_TYPE_USER;
946
947 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
948 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
949 if (rc)
950 goto err0;
951
952 pbl_ptr = cq->q.pbl_tbl->pa;
953 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200954
955 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300956 } else {
957 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
958
959 rc = dev->ops->common->chain_alloc(dev->cdev,
960 QED_CHAIN_USE_TO_CONSUME,
961 QED_CHAIN_MODE_PBL,
962 QED_CHAIN_CNT_TYPE_U32,
963 chain_entries,
964 sizeof(union rdma_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +0300965 &cq->pbl, NULL);
Ram Amrania7efd772016-10-10 13:15:33 +0300966 if (rc)
967 goto err1;
968
969 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
970 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200971 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300972 }
973
974 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
975 pbl_ptr, &params);
976
977 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
978 if (rc)
979 goto err2;
980
981 cq->icid = icid;
982 cq->sig = QEDR_CQ_MAGIC_NUMBER;
983 spin_lock_init(&cq->cq_lock);
984
985 if (ib_ctx) {
986 rc = qedr_copy_cq_uresp(dev, cq, udata);
987 if (rc)
988 goto err3;
989 } else {
990 /* Generate doorbell address. */
991 cq->db_addr = dev->db_addr +
992 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
993 cq->db.data.icid = cq->icid;
994 cq->db.data.params = DB_AGG_CMD_SET <<
995 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
996
997 /* point to the very last element, passing it we will toggle */
998 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
999 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1000 cq->latest_cqe = NULL;
1001 consume_cqe(cq);
1002 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1003 }
1004
1005 DP_DEBUG(dev, QEDR_MSG_CQ,
1006 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1007 cq->icid, cq, params.cq_size);
1008
1009 return &cq->ibcq;
1010
1011err3:
1012 destroy_iparams.icid = cq->icid;
1013 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1014 &destroy_oparams);
1015err2:
1016 if (udata)
1017 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1018 else
1019 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1020err1:
1021 if (udata)
1022 ib_umem_release(cq->q.umem);
1023err0:
1024 kfree(cq);
1025 return ERR_PTR(-EINVAL);
1026}
1027
1028int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1029{
1030 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1031 struct qedr_cq *cq = get_qedr_cq(ibcq);
1032
1033 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1034
1035 return 0;
1036}
1037
Amrani, Ram4dd72632017-04-27 13:35:34 +03001038#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1039#define QEDR_DESTROY_CQ_ITER_DURATION (10)
1040
Ram Amrania7efd772016-10-10 13:15:33 +03001041int qedr_destroy_cq(struct ib_cq *ibcq)
1042{
1043 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1044 struct qed_rdma_destroy_cq_out_params oparams;
1045 struct qed_rdma_destroy_cq_in_params iparams;
1046 struct qedr_cq *cq = get_qedr_cq(ibcq);
Amrani, Ram4dd72632017-04-27 13:35:34 +03001047 int iter;
Amrani, Ram942b3b22017-04-27 13:35:33 +03001048 int rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001049
Amrani, Ram942b3b22017-04-27 13:35:33 +03001050 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
Ram Amrania7efd772016-10-10 13:15:33 +03001051
Amrani, Ram4dd72632017-04-27 13:35:34 +03001052 cq->destroyed = 1;
1053
Ram Amrania7efd772016-10-10 13:15:33 +03001054 /* GSIs CQs are handled by driver, so they don't exist in the FW */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001055 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1056 goto done;
Amrani, Rama1211352016-12-22 14:40:34 +02001057
Amrani, Ram942b3b22017-04-27 13:35:33 +03001058 iparams.icid = cq->icid;
1059 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1060 if (rc)
1061 return rc;
1062
1063 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +03001064
1065 if (ibcq->uobject && ibcq->uobject->context) {
1066 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1067 ib_umem_release(cq->q.umem);
1068 }
1069
Amrani, Ram4dd72632017-04-27 13:35:34 +03001070 /* We don't want the IRQ handler to handle a non-existing CQ so we
1071 * wait until all CNQ interrupts, if any, are received. This will always
1072 * happen and will always happen very fast. If not, then a serious error
1073 * has occured. That is why we can use a long delay.
1074 * We spin for a short time so we don’t lose time on context switching
1075 * in case all the completions are handled in that span. Otherwise
1076 * we sleep for a while and check again. Since the CNQ may be
1077 * associated with (only) the current CPU we use msleep to allow the
1078 * current CPU to be freed.
1079 * The CNQ notification is increased in qedr_irq_handler().
1080 */
1081 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1082 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1083 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1084 iter--;
1085 }
1086
1087 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1088 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1089 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1090 iter--;
1091 }
1092
1093 if (oparams.num_cq_notif != cq->cnq_notif)
1094 goto err;
1095
1096 /* Note that we don't need to have explicit code to wait for the
1097 * completion of the event handler because it is invoked from the EQ.
1098 * Since the destroy CQ ramrod has also been received on the EQ we can
1099 * be certain that there's no event handler in process.
1100 */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001101done:
Amrani, Ram4dd72632017-04-27 13:35:34 +03001102 cq->sig = ~cq->sig;
1103
Ram Amrania7efd772016-10-10 13:15:33 +03001104 kfree(cq);
1105
1106 return 0;
Amrani, Ram4dd72632017-04-27 13:35:34 +03001107
1108err:
1109 DP_ERR(dev,
1110 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1111 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1112
1113 return -EINVAL;
Ram Amrania7efd772016-10-10 13:15:33 +03001114}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001115
1116static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1117 struct ib_qp_attr *attr,
1118 int attr_mask,
1119 struct qed_rdma_modify_qp_in_params
1120 *qp_params)
1121{
1122 enum rdma_network_type nw_type;
1123 struct ib_gid_attr gid_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001124 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001125 union ib_gid gid;
1126 u32 ipv4_addr;
1127 int rc = 0;
1128 int i;
1129
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001130 rc = ib_get_cached_gid(ibqp->device,
1131 rdma_ah_get_port_num(&attr->ah_attr),
1132 grh->sgid_index, &gid, &gid_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001133 if (rc)
1134 return rc;
1135
1136 if (!memcmp(&gid, &zgid, sizeof(gid)))
1137 return -ENOENT;
1138
1139 if (gid_attr.ndev) {
1140 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1141
1142 dev_put(gid_attr.ndev);
1143 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1144 switch (nw_type) {
1145 case RDMA_NETWORK_IPV6:
1146 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1147 sizeof(qp_params->sgid));
1148 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001149 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001150 sizeof(qp_params->dgid));
1151 qp_params->roce_mode = ROCE_V2_IPV6;
1152 SET_FIELD(qp_params->modify_flags,
1153 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1154 break;
1155 case RDMA_NETWORK_IB:
1156 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1157 sizeof(qp_params->sgid));
1158 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001159 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001160 sizeof(qp_params->dgid));
1161 qp_params->roce_mode = ROCE_V1;
1162 break;
1163 case RDMA_NETWORK_IPV4:
1164 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1165 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1166 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1167 qp_params->sgid.ipv4_addr = ipv4_addr;
1168 ipv4_addr =
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001169 qedr_get_ipv4_from_gid(grh->dgid.raw);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001170 qp_params->dgid.ipv4_addr = ipv4_addr;
1171 SET_FIELD(qp_params->modify_flags,
1172 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1173 qp_params->roce_mode = ROCE_V2_IPV4;
1174 break;
1175 }
1176 }
1177
1178 for (i = 0; i < 4; i++) {
1179 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1180 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1181 }
1182
1183 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1184 qp_params->vlan_id = 0;
1185
1186 return 0;
1187}
1188
Ram Amranicecbcdd2016-10-10 13:15:34 +03001189static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1190 struct ib_qp_init_attr *attrs)
1191{
1192 struct qedr_device_attr *qattr = &dev->attr;
1193
1194 /* QP0... attrs->qp_type == IB_QPT_GSI */
1195 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1196 DP_DEBUG(dev, QEDR_MSG_QP,
1197 "create qp: unsupported qp type=0x%x requested\n",
1198 attrs->qp_type);
1199 return -EINVAL;
1200 }
1201
1202 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1203 DP_ERR(dev,
1204 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1205 attrs->cap.max_send_wr, qattr->max_sqe);
1206 return -EINVAL;
1207 }
1208
1209 if (attrs->cap.max_inline_data > qattr->max_inline) {
1210 DP_ERR(dev,
1211 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1212 attrs->cap.max_inline_data, qattr->max_inline);
1213 return -EINVAL;
1214 }
1215
1216 if (attrs->cap.max_send_sge > qattr->max_sge) {
1217 DP_ERR(dev,
1218 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1219 attrs->cap.max_send_sge, qattr->max_sge);
1220 return -EINVAL;
1221 }
1222
1223 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1224 DP_ERR(dev,
1225 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1226 attrs->cap.max_recv_sge, qattr->max_sge);
1227 return -EINVAL;
1228 }
1229
1230 /* Unprivileged user space cannot create special QP */
1231 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1232 DP_ERR(dev,
1233 "create qp: userspace can't create special QPs of type=0x%x\n",
1234 attrs->qp_type);
1235 return -EINVAL;
1236 }
1237
1238 return 0;
1239}
1240
1241static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1242 struct qedr_qp *qp)
1243{
1244 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1245 uresp->rq_icid = qp->icid;
1246}
1247
1248static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1249 struct qedr_qp *qp)
1250{
1251 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1252 uresp->sq_icid = qp->icid + 1;
1253}
1254
1255static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1256 struct qedr_qp *qp, struct ib_udata *udata)
1257{
1258 struct qedr_create_qp_uresp uresp;
1259 int rc;
1260
1261 memset(&uresp, 0, sizeof(uresp));
1262 qedr_copy_sq_uresp(&uresp, qp);
1263 qedr_copy_rq_uresp(&uresp, qp);
1264
1265 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1266 uresp.qp_id = qp->qp_id;
1267
Amrani, Ramc75d3ec2017-06-26 19:05:04 +03001268 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranicecbcdd2016-10-10 13:15:34 +03001269 if (rc)
1270 DP_ERR(dev,
1271 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1272 qp->icid);
1273
1274 return rc;
1275}
1276
Amrani, Ramdf158562016-12-22 14:52:24 +02001277static void qedr_set_common_qp_params(struct qedr_dev *dev,
1278 struct qedr_qp *qp,
1279 struct qedr_pd *pd,
1280 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001281{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001282 spin_lock_init(&qp->q_lock);
Amrani, Ramdf158562016-12-22 14:52:24 +02001283 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001284 qp->qp_type = attrs->qp_type;
1285 qp->max_inline_data = attrs->cap.max_inline_data;
1286 qp->sq.max_sges = attrs->cap.max_send_sge;
1287 qp->state = QED_ROCE_QP_STATE_RESET;
1288 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1289 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1290 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1291 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001292 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001293
1294 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001295 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1296 qp->rq.max_sges, qp->rq_cq->icid);
1297 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001298 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1299 pd->pd_id, qp->qp_type, qp->max_inline_data,
1300 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1301 DP_DEBUG(dev, QEDR_MSG_QP,
1302 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1303 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001304}
1305
Amrani, Ramdf158562016-12-22 14:52:24 +02001306static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001307{
1308 qp->sq.db = dev->db_addr +
1309 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1310 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001311 qp->rq.db = dev->db_addr +
1312 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1313 qp->rq.db_data.data.icid = qp->icid;
1314}
1315
Amrani, Ramdf158562016-12-22 14:52:24 +02001316static inline void
1317qedr_init_common_qp_in_params(struct qedr_dev *dev,
1318 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001319 struct qedr_qp *qp,
1320 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001321 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001322 struct qed_rdma_create_qp_in_params *params)
1323{
Amrani, Ramdf158562016-12-22 14:52:24 +02001324 /* QP handle to be written in an async event */
1325 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1326 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001327
Amrani, Ramdf158562016-12-22 14:52:24 +02001328 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1329 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1330 params->pd = pd->pd_id;
1331 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1332 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1333 params->stats_queue = 0;
1334 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1335 params->srq_id = 0;
1336 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001337}
1338
Amrani, Ramdf158562016-12-22 14:52:24 +02001339static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001340{
Amrani, Ramdf158562016-12-22 14:52:24 +02001341 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1342 "qp=%p. "
1343 "sq_addr=0x%llx, "
1344 "sq_len=%zd, "
1345 "rq_addr=0x%llx, "
1346 "rq_len=%zd"
1347 "\n",
1348 qp,
1349 qp->usq.buf_addr,
1350 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1351}
1352
1353static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1354{
1355 if (qp->usq.umem)
1356 ib_umem_release(qp->usq.umem);
1357 qp->usq.umem = NULL;
1358
1359 if (qp->urq.umem)
1360 ib_umem_release(qp->urq.umem);
1361 qp->urq.umem = NULL;
1362}
1363
1364static int qedr_create_user_qp(struct qedr_dev *dev,
1365 struct qedr_qp *qp,
1366 struct ib_pd *ibpd,
1367 struct ib_udata *udata,
1368 struct ib_qp_init_attr *attrs)
1369{
1370 struct qed_rdma_create_qp_in_params in_params;
1371 struct qed_rdma_create_qp_out_params out_params;
1372 struct qedr_pd *pd = get_qedr_pd(ibpd);
1373 struct ib_ucontext *ib_ctx = NULL;
1374 struct qedr_ucontext *ctx = NULL;
1375 struct qedr_create_qp_ureq ureq;
1376 int rc = -EINVAL;
1377
1378 ib_ctx = ibpd->uobject->context;
1379 ctx = get_qedr_ucontext(ib_ctx);
1380
1381 memset(&ureq, 0, sizeof(ureq));
1382 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1383 if (rc) {
1384 DP_ERR(dev, "Problem copying data from user space\n");
1385 return rc;
1386 }
1387
1388 /* SQ - read access only (0), dma sync not required (0) */
1389 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
1390 ureq.sq_len, 0, 0);
1391 if (rc)
1392 return rc;
1393
1394 /* RQ - read access only (0), dma sync not required (0) */
1395 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
1396 ureq.rq_len, 0, 0);
1397
1398 if (rc)
1399 return rc;
1400
1401 memset(&in_params, 0, sizeof(in_params));
1402 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1403 in_params.qp_handle_lo = ureq.qp_handle_lo;
1404 in_params.qp_handle_hi = ureq.qp_handle_hi;
1405 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1406 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1407 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1408 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1409
1410 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1411 &in_params, &out_params);
1412
1413 if (!qp->qed_qp) {
1414 rc = -ENOMEM;
1415 goto err1;
1416 }
1417
1418 qp->qp_id = out_params.qp_id;
1419 qp->icid = out_params.icid;
1420
1421 rc = qedr_copy_qp_uresp(dev, qp, udata);
1422 if (rc)
1423 goto err;
1424
1425 qedr_qp_user_print(dev, qp);
1426
1427 return 0;
1428err:
1429 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1430 if (rc)
1431 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1432
1433err1:
1434 qedr_cleanup_user(dev, qp);
1435 return rc;
1436}
1437
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001438static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1439{
1440 qp->sq.db = dev->db_addr +
1441 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1442 qp->sq.db_data.data.icid = qp->icid;
1443
1444 qp->rq.db = dev->db_addr +
1445 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1446 qp->rq.db_data.data.icid = qp->icid;
1447 qp->rq.iwarp_db2 = dev->db_addr +
1448 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1449 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1450 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1451}
1452
Amrani, Ramdf158562016-12-22 14:52:24 +02001453static int
1454qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1455 struct qedr_qp *qp,
1456 struct qed_rdma_create_qp_in_params *in_params,
1457 u32 n_sq_elems, u32 n_rq_elems)
1458{
1459 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001460 int rc;
1461
Ram Amranicecbcdd2016-10-10 13:15:34 +03001462 rc = dev->ops->common->chain_alloc(dev->cdev,
1463 QED_CHAIN_USE_TO_PRODUCE,
1464 QED_CHAIN_MODE_PBL,
1465 QED_CHAIN_CNT_TYPE_U32,
1466 n_sq_elems,
1467 QEDR_SQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001468 &qp->sq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001469
1470 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001471 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001472
Amrani, Ramdf158562016-12-22 14:52:24 +02001473 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1474 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001475
Ram Amranicecbcdd2016-10-10 13:15:34 +03001476 rc = dev->ops->common->chain_alloc(dev->cdev,
1477 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1478 QED_CHAIN_MODE_PBL,
1479 QED_CHAIN_CNT_TYPE_U32,
1480 n_rq_elems,
1481 QEDR_RQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001482 &qp->rq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001483 if (rc)
1484 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001485
Amrani, Ramdf158562016-12-22 14:52:24 +02001486 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1487 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001488
Amrani, Ramdf158562016-12-22 14:52:24 +02001489 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1490 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001491
Amrani, Ramdf158562016-12-22 14:52:24 +02001492 if (!qp->qed_qp)
1493 return -EINVAL;
1494
1495 qp->qp_id = out_params.qp_id;
1496 qp->icid = out_params.icid;
1497
1498 qedr_set_roce_db_info(dev, qp);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001499 return rc;
1500}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001501
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001502static int
1503qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1504 struct qedr_qp *qp,
1505 struct qed_rdma_create_qp_in_params *in_params,
1506 u32 n_sq_elems, u32 n_rq_elems)
1507{
1508 struct qed_rdma_create_qp_out_params out_params;
1509 struct qed_chain_ext_pbl ext_pbl;
1510 int rc;
1511
1512 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1513 QEDR_SQE_ELEMENT_SIZE,
1514 QED_CHAIN_MODE_PBL);
1515 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1516 QEDR_RQE_ELEMENT_SIZE,
1517 QED_CHAIN_MODE_PBL);
1518
1519 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1520 in_params, &out_params);
1521
1522 if (!qp->qed_qp)
1523 return -EINVAL;
1524
1525 /* Now we allocate the chain */
1526 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1527 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1528
1529 rc = dev->ops->common->chain_alloc(dev->cdev,
1530 QED_CHAIN_USE_TO_PRODUCE,
1531 QED_CHAIN_MODE_PBL,
1532 QED_CHAIN_CNT_TYPE_U32,
1533 n_sq_elems,
1534 QEDR_SQE_ELEMENT_SIZE,
1535 &qp->sq.pbl, &ext_pbl);
1536
1537 if (rc)
1538 goto err;
1539
1540 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1541 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1542
1543 rc = dev->ops->common->chain_alloc(dev->cdev,
1544 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1545 QED_CHAIN_MODE_PBL,
1546 QED_CHAIN_CNT_TYPE_U32,
1547 n_rq_elems,
1548 QEDR_RQE_ELEMENT_SIZE,
1549 &qp->rq.pbl, &ext_pbl);
1550
1551 if (rc)
1552 goto err;
1553
1554 qp->qp_id = out_params.qp_id;
1555 qp->icid = out_params.icid;
1556
1557 qedr_set_iwarp_db_info(dev, qp);
1558 return rc;
1559
1560err:
1561 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1562
1563 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001564}
1565
Amrani, Ramdf158562016-12-22 14:52:24 +02001566static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001567{
Amrani, Ramdf158562016-12-22 14:52:24 +02001568 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1569 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001570
Amrani, Ramdf158562016-12-22 14:52:24 +02001571 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1572 kfree(qp->rqe_wr_id);
1573}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001574
Amrani, Ramdf158562016-12-22 14:52:24 +02001575static int qedr_create_kernel_qp(struct qedr_dev *dev,
1576 struct qedr_qp *qp,
1577 struct ib_pd *ibpd,
1578 struct ib_qp_init_attr *attrs)
1579{
1580 struct qed_rdma_create_qp_in_params in_params;
1581 struct qedr_pd *pd = get_qedr_pd(ibpd);
1582 int rc = -EINVAL;
1583 u32 n_rq_elems;
1584 u32 n_sq_elems;
1585 u32 n_sq_entries;
1586
1587 memset(&in_params, 0, sizeof(in_params));
1588
1589 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1590 * the ring. The ring should allow at least a single WR, even if the
1591 * user requested none, due to allocation issues.
1592 * We should add an extra WR since the prod and cons indices of
1593 * wqe_wr_id are managed in such a way that the WQ is considered full
1594 * when (prod+1)%max_wr==cons. We currently don't do that because we
1595 * double the number of entries due an iSER issue that pushes far more
1596 * WRs than indicated. If we decline its ib_post_send() then we get
1597 * error prints in the dmesg we'd like to avoid.
1598 */
1599 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1600 dev->attr.max_sqe);
1601
1602 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1603 GFP_KERNEL);
1604 if (!qp->wqe_wr_id) {
1605 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1606 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001607 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001608
Amrani, Ramdf158562016-12-22 14:52:24 +02001609 /* QP handle to be written in CQE */
1610 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1611 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001612
Amrani, Ramdf158562016-12-22 14:52:24 +02001613 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1614 * the ring. There ring should allow at least a single WR, even if the
1615 * user requested none, due to allocation issues.
1616 */
1617 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1618
1619 /* Allocate driver internal RQ array */
1620 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1621 GFP_KERNEL);
1622 if (!qp->rqe_wr_id) {
1623 DP_ERR(dev,
1624 "create qp: failed RQ shadow memory allocation\n");
1625 kfree(qp->wqe_wr_id);
1626 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001627 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001628
Amrani, Ramdf158562016-12-22 14:52:24 +02001629 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001630
Amrani, Ramdf158562016-12-22 14:52:24 +02001631 n_sq_entries = attrs->cap.max_send_wr;
1632 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1633 n_sq_entries = max_t(u32, n_sq_entries, 1);
1634 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001635
Amrani, Ramdf158562016-12-22 14:52:24 +02001636 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1637
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001638 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1639 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1640 n_sq_elems, n_rq_elems);
1641 else
1642 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1643 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001644 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001645 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001646
1647 return rc;
1648}
1649
1650struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1651 struct ib_qp_init_attr *attrs,
1652 struct ib_udata *udata)
1653{
1654 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001655 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001656 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001657 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001658 int rc = 0;
1659
1660 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1661 udata ? "user library" : "kernel", pd);
1662
1663 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1664 if (rc)
1665 return ERR_PTR(rc);
1666
Wei Yongjun181d8012016-10-28 16:33:47 +00001667 if (attrs->srq)
1668 return ERR_PTR(-EINVAL);
1669
Ram Amranicecbcdd2016-10-10 13:15:34 +03001670 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001671 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1672 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001673 get_qedr_cq(attrs->send_cq),
1674 get_qedr_cq(attrs->send_cq)->icid,
1675 get_qedr_cq(attrs->recv_cq),
1676 get_qedr_cq(attrs->recv_cq)->icid);
1677
Amrani, Ramdf158562016-12-22 14:52:24 +02001678 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1679 if (!qp) {
1680 DP_ERR(dev, "create qp: failed allocating memory\n");
1681 return ERR_PTR(-ENOMEM);
1682 }
1683
1684 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001685
Ram Amrani04886772016-10-10 13:15:38 +03001686 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001687 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1688 if (IS_ERR(ibqp))
1689 kfree(qp);
1690 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001691 }
1692
Amrani, Ramdf158562016-12-22 14:52:24 +02001693 if (udata)
1694 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1695 else
1696 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001697
Amrani, Ramdf158562016-12-22 14:52:24 +02001698 if (rc)
1699 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001700
Ram Amranicecbcdd2016-10-10 13:15:34 +03001701 qp->ibqp.qp_num = qp->qp_id;
1702
Ram Amranicecbcdd2016-10-10 13:15:34 +03001703 return &qp->ibqp;
1704
Amrani, Ramdf158562016-12-22 14:52:24 +02001705err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001706 kfree(qp);
1707
1708 return ERR_PTR(-EFAULT);
1709}
1710
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001711static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001712{
1713 switch (qp_state) {
1714 case QED_ROCE_QP_STATE_RESET:
1715 return IB_QPS_RESET;
1716 case QED_ROCE_QP_STATE_INIT:
1717 return IB_QPS_INIT;
1718 case QED_ROCE_QP_STATE_RTR:
1719 return IB_QPS_RTR;
1720 case QED_ROCE_QP_STATE_RTS:
1721 return IB_QPS_RTS;
1722 case QED_ROCE_QP_STATE_SQD:
1723 return IB_QPS_SQD;
1724 case QED_ROCE_QP_STATE_ERR:
1725 return IB_QPS_ERR;
1726 case QED_ROCE_QP_STATE_SQE:
1727 return IB_QPS_SQE;
1728 }
1729 return IB_QPS_ERR;
1730}
1731
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001732static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1733 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001734{
1735 switch (qp_state) {
1736 case IB_QPS_RESET:
1737 return QED_ROCE_QP_STATE_RESET;
1738 case IB_QPS_INIT:
1739 return QED_ROCE_QP_STATE_INIT;
1740 case IB_QPS_RTR:
1741 return QED_ROCE_QP_STATE_RTR;
1742 case IB_QPS_RTS:
1743 return QED_ROCE_QP_STATE_RTS;
1744 case IB_QPS_SQD:
1745 return QED_ROCE_QP_STATE_SQD;
1746 case IB_QPS_ERR:
1747 return QED_ROCE_QP_STATE_ERR;
1748 default:
1749 return QED_ROCE_QP_STATE_ERR;
1750 }
1751}
1752
1753static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1754{
1755 qed_chain_reset(&qph->pbl);
1756 qph->prod = 0;
1757 qph->cons = 0;
1758 qph->wqe_cons = 0;
1759 qph->db_data.data.value = cpu_to_le16(0);
1760}
1761
1762static int qedr_update_qp_state(struct qedr_dev *dev,
1763 struct qedr_qp *qp,
1764 enum qed_roce_qp_state new_state)
1765{
1766 int status = 0;
1767
1768 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001769 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001770
1771 switch (qp->state) {
1772 case QED_ROCE_QP_STATE_RESET:
1773 switch (new_state) {
1774 case QED_ROCE_QP_STATE_INIT:
1775 qp->prev_wqe_size = 0;
1776 qedr_reset_qp_hwq_info(&qp->sq);
1777 qedr_reset_qp_hwq_info(&qp->rq);
1778 break;
1779 default:
1780 status = -EINVAL;
1781 break;
1782 };
1783 break;
1784 case QED_ROCE_QP_STATE_INIT:
1785 switch (new_state) {
1786 case QED_ROCE_QP_STATE_RTR:
1787 /* Update doorbell (in case post_recv was
1788 * done before move to RTR)
1789 */
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001790
1791 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1792 wmb();
1793 writel(qp->rq.db_data.raw, qp->rq.db);
1794 /* Make sure write takes effect */
1795 mmiowb();
1796 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001797 break;
1798 case QED_ROCE_QP_STATE_ERR:
1799 break;
1800 default:
1801 /* Invalid state change. */
1802 status = -EINVAL;
1803 break;
1804 };
1805 break;
1806 case QED_ROCE_QP_STATE_RTR:
1807 /* RTR->XXX */
1808 switch (new_state) {
1809 case QED_ROCE_QP_STATE_RTS:
1810 break;
1811 case QED_ROCE_QP_STATE_ERR:
1812 break;
1813 default:
1814 /* Invalid state change. */
1815 status = -EINVAL;
1816 break;
1817 };
1818 break;
1819 case QED_ROCE_QP_STATE_RTS:
1820 /* RTS->XXX */
1821 switch (new_state) {
1822 case QED_ROCE_QP_STATE_SQD:
1823 break;
1824 case QED_ROCE_QP_STATE_ERR:
1825 break;
1826 default:
1827 /* Invalid state change. */
1828 status = -EINVAL;
1829 break;
1830 };
1831 break;
1832 case QED_ROCE_QP_STATE_SQD:
1833 /* SQD->XXX */
1834 switch (new_state) {
1835 case QED_ROCE_QP_STATE_RTS:
1836 case QED_ROCE_QP_STATE_ERR:
1837 break;
1838 default:
1839 /* Invalid state change. */
1840 status = -EINVAL;
1841 break;
1842 };
1843 break;
1844 case QED_ROCE_QP_STATE_ERR:
1845 /* ERR->XXX */
1846 switch (new_state) {
1847 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001848 if ((qp->rq.prod != qp->rq.cons) ||
1849 (qp->sq.prod != qp->sq.cons)) {
1850 DP_NOTICE(dev,
1851 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1852 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1853 qp->sq.cons);
1854 status = -EINVAL;
1855 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001856 break;
1857 default:
1858 status = -EINVAL;
1859 break;
1860 };
1861 break;
1862 default:
1863 status = -EINVAL;
1864 break;
1865 };
1866
1867 return status;
1868}
1869
1870int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1871 int attr_mask, struct ib_udata *udata)
1872{
1873 struct qedr_qp *qp = get_qedr_qp(ibqp);
1874 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1875 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001876 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001877 enum ib_qp_state old_qp_state, new_qp_state;
1878 int rc = 0;
1879
1880 DP_DEBUG(dev, QEDR_MSG_QP,
1881 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1882 attr->qp_state);
1883
1884 old_qp_state = qedr_get_ibqp_state(qp->state);
1885 if (attr_mask & IB_QP_STATE)
1886 new_qp_state = attr->qp_state;
1887 else
1888 new_qp_state = old_qp_state;
1889
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001890 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1891 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
1892 ibqp->qp_type, attr_mask,
1893 IB_LINK_LAYER_ETHERNET)) {
1894 DP_ERR(dev,
1895 "modify qp: invalid attribute mask=0x%x specified for\n"
1896 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1897 attr_mask, qp->qp_id, ibqp->qp_type,
1898 old_qp_state, new_qp_state);
1899 rc = -EINVAL;
1900 goto err;
1901 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001902 }
1903
1904 /* Translate the masks... */
1905 if (attr_mask & IB_QP_STATE) {
1906 SET_FIELD(qp_params.modify_flags,
1907 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1908 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1909 }
1910
1911 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1912 qp_params.sqd_async = true;
1913
1914 if (attr_mask & IB_QP_PKEY_INDEX) {
1915 SET_FIELD(qp_params.modify_flags,
1916 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1917 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1918 rc = -EINVAL;
1919 goto err;
1920 }
1921
1922 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1923 }
1924
1925 if (attr_mask & IB_QP_QKEY)
1926 qp->qkey = attr->qkey;
1927
1928 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1929 SET_FIELD(qp_params.modify_flags,
1930 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1931 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1932 IB_ACCESS_REMOTE_READ;
1933 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1934 IB_ACCESS_REMOTE_WRITE;
1935 qp_params.incoming_atomic_en = attr->qp_access_flags &
1936 IB_ACCESS_REMOTE_ATOMIC;
1937 }
1938
1939 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1940 if (attr_mask & IB_QP_PATH_MTU) {
1941 if (attr->path_mtu < IB_MTU_256 ||
1942 attr->path_mtu > IB_MTU_4096) {
1943 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1944 rc = -EINVAL;
1945 goto err;
1946 }
1947 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1948 ib_mtu_enum_to_int(iboe_get_mtu
1949 (dev->ndev->mtu)));
1950 }
1951
1952 if (!qp->mtu) {
1953 qp->mtu =
1954 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1955 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1956 }
1957
1958 SET_FIELD(qp_params.modify_flags,
1959 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1960
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001961 qp_params.traffic_class_tos = grh->traffic_class;
1962 qp_params.flow_label = grh->flow_label;
1963 qp_params.hop_limit_ttl = grh->hop_limit;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001964
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001965 qp->sgid_idx = grh->sgid_index;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001966
1967 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1968 if (rc) {
1969 DP_ERR(dev,
1970 "modify qp: problems with GID index %d (rc=%d)\n",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001971 grh->sgid_index, rc);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001972 return rc;
1973 }
1974
1975 rc = qedr_get_dmac(dev, &attr->ah_attr,
1976 qp_params.remote_mac_addr);
1977 if (rc)
1978 return rc;
1979
1980 qp_params.use_local_mac = true;
1981 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1982
1983 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1984 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1985 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1986 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1987 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1988 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1989 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1990 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001991
1992 qp_params.mtu = qp->mtu;
1993 qp_params.lb_indication = false;
1994 }
1995
1996 if (!qp_params.mtu) {
1997 /* Stay with current MTU */
1998 if (qp->mtu)
1999 qp_params.mtu = qp->mtu;
2000 else
2001 qp_params.mtu =
2002 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2003 }
2004
2005 if (attr_mask & IB_QP_TIMEOUT) {
2006 SET_FIELD(qp_params.modify_flags,
2007 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2008
2009 qp_params.ack_timeout = attr->timeout;
2010 if (attr->timeout) {
2011 u32 temp;
2012
2013 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
2014 /* FW requires [msec] */
2015 qp_params.ack_timeout = temp;
2016 } else {
2017 /* Infinite */
2018 qp_params.ack_timeout = 0;
2019 }
2020 }
2021 if (attr_mask & IB_QP_RETRY_CNT) {
2022 SET_FIELD(qp_params.modify_flags,
2023 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2024 qp_params.retry_cnt = attr->retry_cnt;
2025 }
2026
2027 if (attr_mask & IB_QP_RNR_RETRY) {
2028 SET_FIELD(qp_params.modify_flags,
2029 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2030 qp_params.rnr_retry_cnt = attr->rnr_retry;
2031 }
2032
2033 if (attr_mask & IB_QP_RQ_PSN) {
2034 SET_FIELD(qp_params.modify_flags,
2035 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2036 qp_params.rq_psn = attr->rq_psn;
2037 qp->rq_psn = attr->rq_psn;
2038 }
2039
2040 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2041 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2042 rc = -EINVAL;
2043 DP_ERR(dev,
2044 "unsupported max_rd_atomic=%d, supported=%d\n",
2045 attr->max_rd_atomic,
2046 dev->attr.max_qp_req_rd_atomic_resc);
2047 goto err;
2048 }
2049
2050 SET_FIELD(qp_params.modify_flags,
2051 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2052 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2053 }
2054
2055 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2056 SET_FIELD(qp_params.modify_flags,
2057 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2058 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2059 }
2060
2061 if (attr_mask & IB_QP_SQ_PSN) {
2062 SET_FIELD(qp_params.modify_flags,
2063 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2064 qp_params.sq_psn = attr->sq_psn;
2065 qp->sq_psn = attr->sq_psn;
2066 }
2067
2068 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2069 if (attr->max_dest_rd_atomic >
2070 dev->attr.max_qp_resp_rd_atomic_resc) {
2071 DP_ERR(dev,
2072 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2073 attr->max_dest_rd_atomic,
2074 dev->attr.max_qp_resp_rd_atomic_resc);
2075
2076 rc = -EINVAL;
2077 goto err;
2078 }
2079
2080 SET_FIELD(qp_params.modify_flags,
2081 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2082 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2083 }
2084
2085 if (attr_mask & IB_QP_DEST_QPN) {
2086 SET_FIELD(qp_params.modify_flags,
2087 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2088
2089 qp_params.dest_qp = attr->dest_qp_num;
2090 qp->dest_qp_num = attr->dest_qp_num;
2091 }
2092
2093 if (qp->qp_type != IB_QPT_GSI)
2094 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2095 qp->qed_qp, &qp_params);
2096
2097 if (attr_mask & IB_QP_STATE) {
2098 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02002099 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002100 qp->state = qp_params.new_state;
2101 }
2102
2103err:
2104 return rc;
2105}
2106
2107static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2108{
2109 int ib_qp_acc_flags = 0;
2110
2111 if (params->incoming_rdma_write_en)
2112 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2113 if (params->incoming_rdma_read_en)
2114 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2115 if (params->incoming_atomic_en)
2116 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2117 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2118 return ib_qp_acc_flags;
2119}
2120
2121int qedr_query_qp(struct ib_qp *ibqp,
2122 struct ib_qp_attr *qp_attr,
2123 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2124{
2125 struct qed_rdma_query_qp_out_params params;
2126 struct qedr_qp *qp = get_qedr_qp(ibqp);
2127 struct qedr_dev *dev = qp->dev;
2128 int rc = 0;
2129
2130 memset(&params, 0, sizeof(params));
2131
2132 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2133 if (rc)
2134 goto err;
2135
2136 memset(qp_attr, 0, sizeof(*qp_attr));
2137 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2138
2139 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2140 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002141 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002142 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2143 qp_attr->rq_psn = params.rq_psn;
2144 qp_attr->sq_psn = params.sq_psn;
2145 qp_attr->dest_qp_num = params.dest_qp;
2146
2147 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2148
2149 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2150 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2151 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2152 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002153 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002154 qp_init_attr->cap = qp_attr->cap;
2155
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04002156 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002157 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2158 params.flow_label, qp->sgid_idx,
2159 params.hop_limit_ttl, params.traffic_class_tos);
2160 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2161 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2162 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002163 qp_attr->timeout = params.timeout;
2164 qp_attr->rnr_retry = params.rnr_retry;
2165 qp_attr->retry_cnt = params.retry_cnt;
2166 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2167 qp_attr->pkey_index = params.pkey_index;
2168 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002169 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2170 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002171 qp_attr->alt_pkey_index = 0;
2172 qp_attr->alt_port_num = 0;
2173 qp_attr->alt_timeout = 0;
2174 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2175
2176 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2177 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2178 qp_attr->max_rd_atomic = params.max_rd_atomic;
2179 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2180
2181 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2182 qp_attr->cap.max_inline_data);
2183
2184err:
2185 return rc;
2186}
2187
Amrani, Ramdf158562016-12-22 14:52:24 +02002188int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2189{
2190 int rc = 0;
2191
2192 if (qp->qp_type != IB_QPT_GSI) {
2193 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2194 if (rc)
2195 return rc;
2196 }
2197
2198 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2199 qedr_cleanup_user(dev, qp);
2200 else
2201 qedr_cleanup_kernel(dev, qp);
2202
2203 return 0;
2204}
2205
Ram Amranicecbcdd2016-10-10 13:15:34 +03002206int qedr_destroy_qp(struct ib_qp *ibqp)
2207{
2208 struct qedr_qp *qp = get_qedr_qp(ibqp);
2209 struct qedr_dev *dev = qp->dev;
2210 struct ib_qp_attr attr;
2211 int attr_mask = 0;
2212 int rc = 0;
2213
2214 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2215 qp, qp->qp_type);
2216
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002217 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2218 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2219 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2220 (qp->state != QED_ROCE_QP_STATE_INIT)) {
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002221
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002222 attr.qp_state = IB_QPS_ERR;
2223 attr_mask |= IB_QP_STATE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002224
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002225 /* Change the QP state to ERROR */
2226 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2227 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002228 }
2229
Amrani, Ramdf158562016-12-22 14:52:24 +02002230 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002231 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002232
Amrani, Ramdf158562016-12-22 14:52:24 +02002233 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002234
2235 kfree(qp);
2236
2237 return rc;
2238}
Ram Amranie0290cc2016-10-10 13:15:35 +03002239
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002240struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002241 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002242{
2243 struct qedr_ah *ah;
2244
2245 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2246 if (!ah)
2247 return ERR_PTR(-ENOMEM);
2248
2249 ah->attr = *attr;
2250
2251 return &ah->ibah;
2252}
2253
2254int qedr_destroy_ah(struct ib_ah *ibah)
2255{
2256 struct qedr_ah *ah = get_qedr_ah(ibah);
2257
2258 kfree(ah);
2259 return 0;
2260}
2261
Ram Amranie0290cc2016-10-10 13:15:35 +03002262static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2263{
2264 struct qedr_pbl *pbl, *tmp;
2265
2266 if (info->pbl_table)
2267 list_add_tail(&info->pbl_table->list_entry,
2268 &info->free_pbl_list);
2269
2270 if (!list_empty(&info->inuse_pbl_list))
2271 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2272
2273 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2274 list_del(&pbl->list_entry);
2275 qedr_free_pbl(dev, &info->pbl_info, pbl);
2276 }
2277}
2278
2279static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2280 size_t page_list_len, bool two_layered)
2281{
2282 struct qedr_pbl *tmp;
2283 int rc;
2284
2285 INIT_LIST_HEAD(&info->free_pbl_list);
2286 INIT_LIST_HEAD(&info->inuse_pbl_list);
2287
2288 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2289 page_list_len, two_layered);
2290 if (rc)
2291 goto done;
2292
2293 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002294 if (IS_ERR(info->pbl_table)) {
2295 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002296 goto done;
2297 }
2298
2299 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2300 &info->pbl_table->pa);
2301
2302 /* in usual case we use 2 PBLs, so we add one to free
2303 * list and allocating another one
2304 */
2305 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002306 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002307 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2308 goto done;
2309 }
2310
2311 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2312
2313 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2314
2315done:
2316 if (rc)
2317 free_mr_info(dev, info);
2318
2319 return rc;
2320}
2321
2322struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2323 u64 usr_addr, int acc, struct ib_udata *udata)
2324{
2325 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2326 struct qedr_mr *mr;
2327 struct qedr_pd *pd;
2328 int rc = -ENOMEM;
2329
2330 pd = get_qedr_pd(ibpd);
2331 DP_DEBUG(dev, QEDR_MSG_MR,
2332 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2333 pd->pd_id, start, len, usr_addr, acc);
2334
2335 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2336 return ERR_PTR(-EINVAL);
2337
2338 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2339 if (!mr)
2340 return ERR_PTR(rc);
2341
2342 mr->type = QEDR_MR_USER;
2343
2344 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2345 if (IS_ERR(mr->umem)) {
2346 rc = -EFAULT;
2347 goto err0;
2348 }
2349
2350 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2351 if (rc)
2352 goto err1;
2353
2354 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
Ram Amranie57bb6b2017-06-05 16:32:27 +03002355 &mr->info.pbl_info, mr->umem->page_shift);
Ram Amranie0290cc2016-10-10 13:15:35 +03002356
2357 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2358 if (rc) {
2359 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2360 goto err1;
2361 }
2362
2363 /* Index only, 18 bit long, lkey = itid << 8 | key */
2364 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2365 mr->hw_mr.key = 0;
2366 mr->hw_mr.pd = pd->pd_id;
2367 mr->hw_mr.local_read = 1;
2368 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2369 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2370 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2371 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2372 mr->hw_mr.mw_bind = false;
2373 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2374 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2375 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002376 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002377 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2378 mr->hw_mr.length = len;
2379 mr->hw_mr.vaddr = usr_addr;
2380 mr->hw_mr.zbva = false;
2381 mr->hw_mr.phy_mr = false;
2382 mr->hw_mr.dma_mr = false;
2383
2384 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2385 if (rc) {
2386 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2387 goto err2;
2388 }
2389
2390 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2391 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2392 mr->hw_mr.remote_atomic)
2393 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2394
2395 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2396 mr->ibmr.lkey);
2397 return &mr->ibmr;
2398
2399err2:
2400 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2401err1:
2402 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2403err0:
2404 kfree(mr);
2405 return ERR_PTR(rc);
2406}
2407
2408int qedr_dereg_mr(struct ib_mr *ib_mr)
2409{
2410 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2411 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2412 int rc = 0;
2413
2414 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2415 if (rc)
2416 return rc;
2417
2418 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2419
2420 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2421 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2422
2423 /* it could be user registered memory. */
2424 if (mr->umem)
2425 ib_umem_release(mr->umem);
2426
2427 kfree(mr);
2428
2429 return rc;
2430}
2431
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002432static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2433 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002434{
2435 struct qedr_pd *pd = get_qedr_pd(ibpd);
2436 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2437 struct qedr_mr *mr;
2438 int rc = -ENOMEM;
2439
2440 DP_DEBUG(dev, QEDR_MSG_MR,
2441 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2442 max_page_list_len);
2443
2444 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2445 if (!mr)
2446 return ERR_PTR(rc);
2447
2448 mr->dev = dev;
2449 mr->type = QEDR_MR_FRMR;
2450
2451 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2452 if (rc)
2453 goto err0;
2454
2455 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2456 if (rc) {
2457 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2458 goto err0;
2459 }
2460
2461 /* Index only, 18 bit long, lkey = itid << 8 | key */
2462 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2463 mr->hw_mr.key = 0;
2464 mr->hw_mr.pd = pd->pd_id;
2465 mr->hw_mr.local_read = 1;
2466 mr->hw_mr.local_write = 0;
2467 mr->hw_mr.remote_read = 0;
2468 mr->hw_mr.remote_write = 0;
2469 mr->hw_mr.remote_atomic = 0;
2470 mr->hw_mr.mw_bind = false;
2471 mr->hw_mr.pbl_ptr = 0;
2472 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2473 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2474 mr->hw_mr.fbo = 0;
2475 mr->hw_mr.length = 0;
2476 mr->hw_mr.vaddr = 0;
2477 mr->hw_mr.zbva = false;
2478 mr->hw_mr.phy_mr = true;
2479 mr->hw_mr.dma_mr = false;
2480
2481 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2482 if (rc) {
2483 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2484 goto err1;
2485 }
2486
2487 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2488 mr->ibmr.rkey = mr->ibmr.lkey;
2489
2490 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2491 return mr;
2492
2493err1:
2494 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2495err0:
2496 kfree(mr);
2497 return ERR_PTR(rc);
2498}
2499
2500struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2501 enum ib_mr_type mr_type, u32 max_num_sg)
2502{
2503 struct qedr_dev *dev;
2504 struct qedr_mr *mr;
2505
2506 if (mr_type != IB_MR_TYPE_MEM_REG)
2507 return ERR_PTR(-EINVAL);
2508
2509 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2510
2511 if (IS_ERR(mr))
2512 return ERR_PTR(-EINVAL);
2513
2514 dev = mr->dev;
2515
2516 return &mr->ibmr;
2517}
2518
2519static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2520{
2521 struct qedr_mr *mr = get_qedr_mr(ibmr);
2522 struct qedr_pbl *pbl_table;
2523 struct regpair *pbe;
2524 u32 pbes_in_page;
2525
2526 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2527 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2528 return -ENOMEM;
2529 }
2530
2531 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2532 mr->npages, addr);
2533
2534 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2535 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2536 pbe = (struct regpair *)pbl_table->va;
2537 pbe += mr->npages % pbes_in_page;
2538 pbe->lo = cpu_to_le32((u32)addr);
2539 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2540
2541 mr->npages++;
2542
2543 return 0;
2544}
2545
2546static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2547{
2548 int work = info->completed - info->completed_handled - 1;
2549
2550 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2551 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2552 struct qedr_pbl *pbl;
2553
2554 /* Free all the page list that are possible to be freed
2555 * (all the ones that were invalidated), under the assumption
2556 * that if an FMR was completed successfully that means that
2557 * if there was an invalidate operation before it also ended
2558 */
2559 pbl = list_first_entry(&info->inuse_pbl_list,
2560 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002561 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002562 info->completed_handled++;
2563 }
2564}
2565
2566int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2567 int sg_nents, unsigned int *sg_offset)
2568{
2569 struct qedr_mr *mr = get_qedr_mr(ibmr);
2570
2571 mr->npages = 0;
2572
2573 handle_completed_mrs(mr->dev, &mr->info);
2574 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2575}
2576
2577struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2578{
2579 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2580 struct qedr_pd *pd = get_qedr_pd(ibpd);
2581 struct qedr_mr *mr;
2582 int rc;
2583
2584 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2585 if (!mr)
2586 return ERR_PTR(-ENOMEM);
2587
2588 mr->type = QEDR_MR_DMA;
2589
2590 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2591 if (rc) {
2592 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2593 goto err1;
2594 }
2595
2596 /* index only, 18 bit long, lkey = itid << 8 | key */
2597 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2598 mr->hw_mr.pd = pd->pd_id;
2599 mr->hw_mr.local_read = 1;
2600 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2601 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2602 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2603 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2604 mr->hw_mr.dma_mr = true;
2605
2606 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2607 if (rc) {
2608 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2609 goto err2;
2610 }
2611
2612 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2613 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2614 mr->hw_mr.remote_atomic)
2615 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2616
2617 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2618 return &mr->ibmr;
2619
2620err2:
2621 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2622err1:
2623 kfree(mr);
2624 return ERR_PTR(rc);
2625}
Ram Amraniafa0e132016-10-10 13:15:36 +03002626
2627static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2628{
2629 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2630}
2631
2632static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2633{
2634 int i, len = 0;
2635
2636 for (i = 0; i < num_sge; i++)
2637 len += sg_list[i].length;
2638
2639 return len;
2640}
2641
2642static void swap_wqe_data64(u64 *p)
2643{
2644 int i;
2645
2646 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2647 *p = cpu_to_be64(cpu_to_le64(*p));
2648}
2649
2650static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2651 struct qedr_qp *qp, u8 *wqe_size,
2652 struct ib_send_wr *wr,
2653 struct ib_send_wr **bad_wr, u8 *bits,
2654 u8 bit)
2655{
2656 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2657 char *seg_prt, *wqe;
2658 int i, seg_siz;
2659
2660 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2661 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2662 *bad_wr = wr;
2663 return 0;
2664 }
2665
2666 if (!data_size)
2667 return data_size;
2668
2669 *bits |= bit;
2670
2671 seg_prt = NULL;
2672 wqe = NULL;
2673 seg_siz = 0;
2674
2675 /* Copy data inline */
2676 for (i = 0; i < wr->num_sge; i++) {
2677 u32 len = wr->sg_list[i].length;
2678 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2679
2680 while (len > 0) {
2681 u32 cur;
2682
2683 /* New segment required */
2684 if (!seg_siz) {
2685 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2686 seg_prt = wqe;
2687 seg_siz = sizeof(struct rdma_sq_common_wqe);
2688 (*wqe_size)++;
2689 }
2690
2691 /* Calculate currently allowed length */
2692 cur = min_t(u32, len, seg_siz);
2693 memcpy(seg_prt, src, cur);
2694
2695 /* Update segment variables */
2696 seg_prt += cur;
2697 seg_siz -= cur;
2698
2699 /* Update sge variables */
2700 src += cur;
2701 len -= cur;
2702
2703 /* Swap fully-completed segments */
2704 if (!seg_siz)
2705 swap_wqe_data64((u64 *)wqe);
2706 }
2707 }
2708
2709 /* swap last not completed segment */
2710 if (seg_siz)
2711 swap_wqe_data64((u64 *)wqe);
2712
2713 return data_size;
2714}
2715
2716#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2717 do { \
2718 DMA_REGPAIR_LE(sge->addr, vaddr); \
2719 (sge)->length = cpu_to_le32(vlength); \
2720 (sge)->flags = cpu_to_le32(vflags); \
2721 } while (0)
2722
2723#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2724 do { \
2725 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2726 (hdr)->num_sges = num_sge; \
2727 } while (0)
2728
2729#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2730 do { \
2731 DMA_REGPAIR_LE(sge->addr, vaddr); \
2732 (sge)->length = cpu_to_le32(vlength); \
2733 (sge)->l_key = cpu_to_le32(vlkey); \
2734 } while (0)
2735
2736static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2737 struct ib_send_wr *wr)
2738{
2739 u32 data_size = 0;
2740 int i;
2741
2742 for (i = 0; i < wr->num_sge; i++) {
2743 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2744
2745 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2746 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2747 sge->length = cpu_to_le32(wr->sg_list[i].length);
2748 data_size += wr->sg_list[i].length;
2749 }
2750
2751 if (wqe_size)
2752 *wqe_size += wr->num_sge;
2753
2754 return data_size;
2755}
2756
2757static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2758 struct qedr_qp *qp,
2759 struct rdma_sq_rdma_wqe_1st *rwqe,
2760 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2761 struct ib_send_wr *wr,
2762 struct ib_send_wr **bad_wr)
2763{
2764 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2765 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2766
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002767 if (wr->send_flags & IB_SEND_INLINE &&
2768 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2769 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002770 u8 flags = 0;
2771
2772 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2773 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2774 bad_wr, &rwqe->flags, flags);
2775 }
2776
2777 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2778}
2779
2780static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2781 struct qedr_qp *qp,
2782 struct rdma_sq_send_wqe_1st *swqe,
2783 struct rdma_sq_send_wqe_2st *swqe2,
2784 struct ib_send_wr *wr,
2785 struct ib_send_wr **bad_wr)
2786{
2787 memset(swqe2, 0, sizeof(*swqe2));
2788 if (wr->send_flags & IB_SEND_INLINE) {
2789 u8 flags = 0;
2790
2791 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2792 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2793 bad_wr, &swqe->flags, flags);
2794 }
2795
2796 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2797}
2798
2799static int qedr_prepare_reg(struct qedr_qp *qp,
2800 struct rdma_sq_fmr_wqe_1st *fwqe1,
2801 struct ib_reg_wr *wr)
2802{
2803 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2804 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2805
2806 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2807 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2808 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2809 fwqe1->l_key = wr->key;
2810
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002811 fwqe2->access_ctrl = 0;
2812
Ram Amraniafa0e132016-10-10 13:15:36 +03002813 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2814 !!(wr->access & IB_ACCESS_REMOTE_READ));
2815 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2816 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2817 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2818 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2819 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2820 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2821 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2822 fwqe2->fmr_ctrl = 0;
2823
2824 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2825 ilog2(mr->ibmr.page_size) - 12);
2826
2827 fwqe2->length_hi = 0;
2828 fwqe2->length_lo = mr->ibmr.length;
2829 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2830 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2831
2832 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2833
2834 return 0;
2835}
2836
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002837static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002838{
2839 switch (opcode) {
2840 case IB_WR_RDMA_WRITE:
2841 case IB_WR_RDMA_WRITE_WITH_IMM:
2842 return IB_WC_RDMA_WRITE;
2843 case IB_WR_SEND_WITH_IMM:
2844 case IB_WR_SEND:
2845 case IB_WR_SEND_WITH_INV:
2846 return IB_WC_SEND;
2847 case IB_WR_RDMA_READ:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03002848 case IB_WR_RDMA_READ_WITH_INV:
Ram Amraniafa0e132016-10-10 13:15:36 +03002849 return IB_WC_RDMA_READ;
2850 case IB_WR_ATOMIC_CMP_AND_SWP:
2851 return IB_WC_COMP_SWAP;
2852 case IB_WR_ATOMIC_FETCH_AND_ADD:
2853 return IB_WC_FETCH_ADD;
2854 case IB_WR_REG_MR:
2855 return IB_WC_REG_MR;
2856 case IB_WR_LOCAL_INV:
2857 return IB_WC_LOCAL_INV;
2858 default:
2859 return IB_WC_SEND;
2860 }
2861}
2862
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002863static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002864{
2865 int wq_is_full, err_wr, pbl_is_full;
2866 struct qedr_dev *dev = qp->dev;
2867
2868 /* prevent SQ overflow and/or processing of a bad WR */
2869 err_wr = wr->num_sge > qp->sq.max_sges;
2870 wq_is_full = qedr_wq_is_full(&qp->sq);
2871 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2872 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2873 if (wq_is_full || err_wr || pbl_is_full) {
2874 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2875 DP_ERR(dev,
2876 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2877 qp);
2878 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2879 }
2880
2881 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2882 DP_ERR(dev,
2883 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2884 qp);
2885 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2886 }
2887
2888 if (pbl_is_full &&
2889 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2890 DP_ERR(dev,
2891 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2892 qp);
2893 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2894 }
2895 return false;
2896 }
2897 return true;
2898}
2899
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002900static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03002901 struct ib_send_wr **bad_wr)
2902{
2903 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2904 struct qedr_qp *qp = get_qedr_qp(ibqp);
2905 struct rdma_sq_atomic_wqe_1st *awqe1;
2906 struct rdma_sq_atomic_wqe_2nd *awqe2;
2907 struct rdma_sq_atomic_wqe_3rd *awqe3;
2908 struct rdma_sq_send_wqe_2st *swqe2;
2909 struct rdma_sq_local_inv_wqe *iwqe;
2910 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2911 struct rdma_sq_send_wqe_1st *swqe;
2912 struct rdma_sq_rdma_wqe_1st *rwqe;
2913 struct rdma_sq_fmr_wqe_1st *fwqe1;
2914 struct rdma_sq_common_wqe *wqe;
2915 u32 length;
2916 int rc = 0;
2917 bool comp;
2918
2919 if (!qedr_can_post_send(qp, wr)) {
2920 *bad_wr = wr;
2921 return -ENOMEM;
2922 }
2923
2924 wqe = qed_chain_produce(&qp->sq.pbl);
2925 qp->wqe_wr_id[qp->sq.prod].signaled =
2926 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2927
2928 wqe->flags = 0;
2929 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2930 !!(wr->send_flags & IB_SEND_SOLICITED));
2931 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2932 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2933 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2934 !!(wr->send_flags & IB_SEND_FENCE));
2935 wqe->prev_wqe_size = qp->prev_wqe_size;
2936
2937 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2938
2939 switch (wr->opcode) {
2940 case IB_WR_SEND_WITH_IMM:
2941 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2942 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2943 swqe->wqe_size = 2;
2944 swqe2 = qed_chain_produce(&qp->sq.pbl);
2945
2946 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2947 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2948 wr, bad_wr);
2949 swqe->length = cpu_to_le32(length);
2950 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2951 qp->prev_wqe_size = swqe->wqe_size;
2952 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2953 break;
2954 case IB_WR_SEND:
2955 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2956 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2957
2958 swqe->wqe_size = 2;
2959 swqe2 = qed_chain_produce(&qp->sq.pbl);
2960 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2961 wr, bad_wr);
2962 swqe->length = cpu_to_le32(length);
2963 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2964 qp->prev_wqe_size = swqe->wqe_size;
2965 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2966 break;
2967 case IB_WR_SEND_WITH_INV:
2968 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2969 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2970 swqe2 = qed_chain_produce(&qp->sq.pbl);
2971 swqe->wqe_size = 2;
2972 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2973 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2974 wr, bad_wr);
2975 swqe->length = cpu_to_le32(length);
2976 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2977 qp->prev_wqe_size = swqe->wqe_size;
2978 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2979 break;
2980
2981 case IB_WR_RDMA_WRITE_WITH_IMM:
2982 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2983 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2984
2985 rwqe->wqe_size = 2;
2986 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2987 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2988 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2989 wr, bad_wr);
2990 rwqe->length = cpu_to_le32(length);
2991 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2992 qp->prev_wqe_size = rwqe->wqe_size;
2993 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2994 break;
2995 case IB_WR_RDMA_WRITE:
2996 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2997 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2998
2999 rwqe->wqe_size = 2;
3000 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3001 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3002 wr, bad_wr);
3003 rwqe->length = cpu_to_le32(length);
3004 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3005 qp->prev_wqe_size = rwqe->wqe_size;
3006 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3007 break;
3008 case IB_WR_RDMA_READ_WITH_INV:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03003009 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3010 /* fallthrough... same is identical to RDMA READ */
Ram Amraniafa0e132016-10-10 13:15:36 +03003011
3012 case IB_WR_RDMA_READ:
3013 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3014 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3015
3016 rwqe->wqe_size = 2;
3017 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3018 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3019 wr, bad_wr);
3020 rwqe->length = cpu_to_le32(length);
3021 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3022 qp->prev_wqe_size = rwqe->wqe_size;
3023 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3024 break;
3025
3026 case IB_WR_ATOMIC_CMP_AND_SWP:
3027 case IB_WR_ATOMIC_FETCH_AND_ADD:
3028 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3029 awqe1->wqe_size = 4;
3030
3031 awqe2 = qed_chain_produce(&qp->sq.pbl);
3032 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3033 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3034
3035 awqe3 = qed_chain_produce(&qp->sq.pbl);
3036
3037 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3038 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3039 DMA_REGPAIR_LE(awqe3->swap_data,
3040 atomic_wr(wr)->compare_add);
3041 } else {
3042 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3043 DMA_REGPAIR_LE(awqe3->swap_data,
3044 atomic_wr(wr)->swap);
3045 DMA_REGPAIR_LE(awqe3->cmp_data,
3046 atomic_wr(wr)->compare_add);
3047 }
3048
3049 qedr_prepare_sq_sges(qp, NULL, wr);
3050
3051 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3052 qp->prev_wqe_size = awqe1->wqe_size;
3053 break;
3054
3055 case IB_WR_LOCAL_INV:
3056 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3057 iwqe->wqe_size = 1;
3058
3059 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3060 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3061 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3062 qp->prev_wqe_size = iwqe->wqe_size;
3063 break;
3064 case IB_WR_REG_MR:
3065 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3066 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3067 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3068 fwqe1->wqe_size = 2;
3069
3070 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3071 if (rc) {
3072 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3073 *bad_wr = wr;
3074 break;
3075 }
3076
3077 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3078 qp->prev_wqe_size = fwqe1->wqe_size;
3079 break;
3080 default:
3081 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3082 rc = -EINVAL;
3083 *bad_wr = wr;
3084 break;
3085 }
3086
3087 if (*bad_wr) {
3088 u16 value;
3089
3090 /* Restore prod to its position before
3091 * this WR was processed
3092 */
3093 value = le16_to_cpu(qp->sq.db_data.data.value);
3094 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3095
3096 /* Restore prev_wqe_size */
3097 qp->prev_wqe_size = wqe->prev_wqe_size;
3098 rc = -EINVAL;
3099 DP_ERR(dev, "POST SEND FAILED\n");
3100 }
3101
3102 return rc;
3103}
3104
3105int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3106 struct ib_send_wr **bad_wr)
3107{
3108 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3109 struct qedr_qp *qp = get_qedr_qp(ibqp);
3110 unsigned long flags;
3111 int rc = 0;
3112
3113 *bad_wr = NULL;
3114
Ram Amrani04886772016-10-10 13:15:38 +03003115 if (qp->qp_type == IB_QPT_GSI)
3116 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3117
Ram Amraniafa0e132016-10-10 13:15:36 +03003118 spin_lock_irqsave(&qp->q_lock, flags);
3119
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003120 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3121 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3122 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3123 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3124 spin_unlock_irqrestore(&qp->q_lock, flags);
3125 *bad_wr = wr;
3126 DP_DEBUG(dev, QEDR_MSG_CQ,
3127 "QP in wrong state! QP icid=0x%x state %d\n",
3128 qp->icid, qp->state);
3129 return -EINVAL;
3130 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003131 }
3132
Ram Amraniafa0e132016-10-10 13:15:36 +03003133 while (wr) {
3134 rc = __qedr_post_send(ibqp, wr, bad_wr);
3135 if (rc)
3136 break;
3137
3138 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3139
3140 qedr_inc_sw_prod(&qp->sq);
3141
3142 qp->sq.db_data.data.value++;
3143
3144 wr = wr->next;
3145 }
3146
3147 /* Trigger doorbell
3148 * If there was a failure in the first WR then it will be triggered in
3149 * vane. However this is not harmful (as long as the producer value is
3150 * unchanged). For performance reasons we avoid checking for this
3151 * redundant doorbell.
3152 */
3153 wmb();
3154 writel(qp->sq.db_data.raw, qp->sq.db);
3155
3156 /* Make sure write sticks */
3157 mmiowb();
3158
3159 spin_unlock_irqrestore(&qp->q_lock, flags);
3160
3161 return rc;
3162}
3163
3164int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3165 struct ib_recv_wr **bad_wr)
3166{
3167 struct qedr_qp *qp = get_qedr_qp(ibqp);
3168 struct qedr_dev *dev = qp->dev;
3169 unsigned long flags;
3170 int status = 0;
3171
Ram Amrani04886772016-10-10 13:15:38 +03003172 if (qp->qp_type == IB_QPT_GSI)
3173 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3174
Ram Amraniafa0e132016-10-10 13:15:36 +03003175 spin_lock_irqsave(&qp->q_lock, flags);
3176
Amrani, Ram922d9a42016-12-22 14:40:38 +02003177 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003178 spin_unlock_irqrestore(&qp->q_lock, flags);
3179 *bad_wr = wr;
3180 return -EINVAL;
3181 }
3182
3183 while (wr) {
3184 int i;
3185
3186 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3187 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3188 wr->num_sge > qp->rq.max_sges) {
3189 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3190 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3191 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3192 qp->rq.max_sges);
3193 status = -ENOMEM;
3194 *bad_wr = wr;
3195 break;
3196 }
3197 for (i = 0; i < wr->num_sge; i++) {
3198 u32 flags = 0;
3199 struct rdma_rq_sge *rqe =
3200 qed_chain_produce(&qp->rq.pbl);
3201
3202 /* First one must include the number
3203 * of SGE in the list
3204 */
3205 if (!i)
3206 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3207 wr->num_sge);
3208
3209 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3210 wr->sg_list[i].lkey);
3211
3212 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3213 wr->sg_list[i].length, flags);
3214 }
3215
3216 /* Special case of no sges. FW requires between 1-4 sges...
3217 * in this case we need to post 1 sge with length zero. this is
3218 * because rdma write with immediate consumes an RQ.
3219 */
3220 if (!wr->num_sge) {
3221 u32 flags = 0;
3222 struct rdma_rq_sge *rqe =
3223 qed_chain_produce(&qp->rq.pbl);
3224
3225 /* First one must include the number
3226 * of SGE in the list
3227 */
3228 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3229 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3230
3231 RQ_SGE_SET(rqe, 0, 0, flags);
3232 i = 1;
3233 }
3234
3235 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3236 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3237
3238 qedr_inc_sw_prod(&qp->rq);
3239
3240 /* Flush all the writes before signalling doorbell */
3241 wmb();
3242
3243 qp->rq.db_data.data.value++;
3244
3245 writel(qp->rq.db_data.raw, qp->rq.db);
3246
3247 /* Make sure write sticks */
3248 mmiowb();
3249
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003250 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3251 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3252 mmiowb(); /* for second doorbell */
3253 }
3254
Ram Amraniafa0e132016-10-10 13:15:36 +03003255 wr = wr->next;
3256 }
3257
3258 spin_unlock_irqrestore(&qp->q_lock, flags);
3259
3260 return status;
3261}
3262
3263static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3264{
3265 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3266
3267 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3268 cq->pbl_toggle;
3269}
3270
3271static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3272{
3273 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3274 struct qedr_qp *qp;
3275
3276 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3277 resp_cqe->qp_handle.lo,
3278 u64);
3279 return qp;
3280}
3281
3282static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3283{
3284 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3285
3286 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3287}
3288
3289/* Return latest CQE (needs processing) */
3290static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3291{
3292 return cq->latest_cqe;
3293}
3294
3295/* In fmr we need to increase the number of fmr completed counter for the fmr
3296 * algorithm determining whether we can free a pbl or not.
3297 * we need to perform this whether the work request was signaled or not. for
3298 * this purpose we call this function from the condition that checks if a wr
3299 * should be skipped, to make sure we don't miss it ( possibly this fmr
3300 * operation was not signalted)
3301 */
3302static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3303{
3304 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3305 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3306}
3307
3308static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3309 struct qedr_cq *cq, int num_entries,
3310 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3311 int force)
3312{
3313 u16 cnt = 0;
3314
3315 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3316 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3317 qedr_chk_if_fmr(qp);
3318 /* skip WC */
3319 goto next_cqe;
3320 }
3321
3322 /* fill WC */
3323 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003324 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003325 wc->wc_flags = 0;
3326 wc->src_qp = qp->id;
3327 wc->qp = &qp->ibqp;
3328
3329 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3330 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3331
3332 switch (wc->opcode) {
3333 case IB_WC_RDMA_WRITE:
3334 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3335 break;
3336 case IB_WC_COMP_SWAP:
3337 case IB_WC_FETCH_ADD:
3338 wc->byte_len = 8;
3339 break;
3340 case IB_WC_REG_MR:
3341 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3342 break;
Michal Kalderondac27382017-06-05 16:32:26 +03003343 case IB_WC_RDMA_READ:
3344 case IB_WC_SEND:
3345 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3346 break;
Ram Amraniafa0e132016-10-10 13:15:36 +03003347 default:
3348 break;
3349 }
3350
3351 num_entries--;
3352 wc++;
3353 cnt++;
3354next_cqe:
3355 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3356 qed_chain_consume(&qp->sq.pbl);
3357 qedr_inc_sw_cons(&qp->sq);
3358 }
3359
3360 return cnt;
3361}
3362
3363static int qedr_poll_cq_req(struct qedr_dev *dev,
3364 struct qedr_qp *qp, struct qedr_cq *cq,
3365 int num_entries, struct ib_wc *wc,
3366 struct rdma_cqe_requester *req)
3367{
3368 int cnt = 0;
3369
3370 switch (req->status) {
3371 case RDMA_CQE_REQ_STS_OK:
3372 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3373 IB_WC_SUCCESS, 0);
3374 break;
3375 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003376 if (qp->state != QED_ROCE_QP_STATE_ERR)
3377 DP_ERR(dev,
3378 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3379 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003380 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003381 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003382 break;
3383 default:
3384 /* process all WQE before the cosumer */
3385 qp->state = QED_ROCE_QP_STATE_ERR;
3386 cnt = process_req(dev, qp, cq, num_entries, wc,
3387 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3388 wc += cnt;
3389 /* if we have extra WC fill it with actual error info */
3390 if (cnt < num_entries) {
3391 enum ib_wc_status wc_status;
3392
3393 switch (req->status) {
3394 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3395 DP_ERR(dev,
3396 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3397 cq->icid, qp->icid);
3398 wc_status = IB_WC_BAD_RESP_ERR;
3399 break;
3400 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3401 DP_ERR(dev,
3402 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3403 cq->icid, qp->icid);
3404 wc_status = IB_WC_LOC_LEN_ERR;
3405 break;
3406 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3407 DP_ERR(dev,
3408 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3409 cq->icid, qp->icid);
3410 wc_status = IB_WC_LOC_QP_OP_ERR;
3411 break;
3412 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3413 DP_ERR(dev,
3414 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3415 cq->icid, qp->icid);
3416 wc_status = IB_WC_LOC_PROT_ERR;
3417 break;
3418 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3419 DP_ERR(dev,
3420 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3421 cq->icid, qp->icid);
3422 wc_status = IB_WC_MW_BIND_ERR;
3423 break;
3424 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3425 DP_ERR(dev,
3426 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3427 cq->icid, qp->icid);
3428 wc_status = IB_WC_REM_INV_REQ_ERR;
3429 break;
3430 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3431 DP_ERR(dev,
3432 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3433 cq->icid, qp->icid);
3434 wc_status = IB_WC_REM_ACCESS_ERR;
3435 break;
3436 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3437 DP_ERR(dev,
3438 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3439 cq->icid, qp->icid);
3440 wc_status = IB_WC_REM_OP_ERR;
3441 break;
3442 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3443 DP_ERR(dev,
3444 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3445 cq->icid, qp->icid);
3446 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3447 break;
3448 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3449 DP_ERR(dev,
3450 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3451 cq->icid, qp->icid);
3452 wc_status = IB_WC_RETRY_EXC_ERR;
3453 break;
3454 default:
3455 DP_ERR(dev,
3456 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3457 cq->icid, qp->icid);
3458 wc_status = IB_WC_GENERAL_ERR;
3459 }
3460 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3461 wc_status, 1);
3462 }
3463 }
3464
3465 return cnt;
3466}
3467
Amrani, Ramb6acd712017-04-27 13:35:35 +03003468static inline int qedr_cqe_resp_status_to_ib(u8 status)
3469{
3470 switch (status) {
3471 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3472 return IB_WC_LOC_ACCESS_ERR;
3473 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3474 return IB_WC_LOC_LEN_ERR;
3475 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3476 return IB_WC_LOC_QP_OP_ERR;
3477 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3478 return IB_WC_LOC_PROT_ERR;
3479 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3480 return IB_WC_MW_BIND_ERR;
3481 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3482 return IB_WC_REM_INV_RD_REQ_ERR;
3483 case RDMA_CQE_RESP_STS_OK:
3484 return IB_WC_SUCCESS;
3485 default:
3486 return IB_WC_GENERAL_ERR;
3487 }
3488}
3489
3490static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3491 struct ib_wc *wc)
3492{
3493 wc->status = IB_WC_SUCCESS;
3494 wc->byte_len = le32_to_cpu(resp->length);
3495
3496 if (resp->flags & QEDR_RESP_IMM) {
3497 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3498 wc->wc_flags |= IB_WC_WITH_IMM;
3499
3500 if (resp->flags & QEDR_RESP_RDMA)
3501 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3502
3503 if (resp->flags & QEDR_RESP_INV)
3504 return -EINVAL;
3505
3506 } else if (resp->flags & QEDR_RESP_INV) {
3507 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3508 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3509
3510 if (resp->flags & QEDR_RESP_RDMA)
3511 return -EINVAL;
3512
3513 } else if (resp->flags & QEDR_RESP_RDMA) {
3514 return -EINVAL;
3515 }
3516
3517 return 0;
3518}
3519
Ram Amraniafa0e132016-10-10 13:15:36 +03003520static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3521 struct qedr_cq *cq, struct ib_wc *wc,
3522 struct rdma_cqe_responder *resp, u64 wr_id)
3523{
Amrani, Ramb6acd712017-04-27 13:35:35 +03003524 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
Ram Amraniafa0e132016-10-10 13:15:36 +03003525 wc->opcode = IB_WC_RECV;
3526 wc->wc_flags = 0;
3527
Amrani, Ramb6acd712017-04-27 13:35:35 +03003528 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3529 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3530 DP_ERR(dev,
3531 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3532 cq, cq->icid, resp->flags);
Ram Amraniafa0e132016-10-10 13:15:36 +03003533
Amrani, Ramb6acd712017-04-27 13:35:35 +03003534 } else {
3535 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3536 if (wc->status == IB_WC_GENERAL_ERR)
3537 DP_ERR(dev,
3538 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3539 cq, cq->icid, resp->status);
Ram Amraniafa0e132016-10-10 13:15:36 +03003540 }
3541
Amrani, Ramb6acd712017-04-27 13:35:35 +03003542 /* Fill the rest of the WC */
Amrani, Ram27035a12016-12-22 14:40:39 +02003543 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003544 wc->src_qp = qp->id;
3545 wc->qp = &qp->ibqp;
3546 wc->wr_id = wr_id;
3547}
3548
3549static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3550 struct qedr_cq *cq, struct ib_wc *wc,
3551 struct rdma_cqe_responder *resp)
3552{
3553 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3554
3555 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3556
3557 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3558 qed_chain_consume(&qp->rq.pbl);
3559 qedr_inc_sw_cons(&qp->rq);
3560
3561 return 1;
3562}
3563
3564static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3565 int num_entries, struct ib_wc *wc, u16 hw_cons)
3566{
3567 u16 cnt = 0;
3568
3569 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3570 /* fill WC */
3571 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003572 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003573 wc->wc_flags = 0;
3574 wc->src_qp = qp->id;
3575 wc->byte_len = 0;
3576 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3577 wc->qp = &qp->ibqp;
3578 num_entries--;
3579 wc++;
3580 cnt++;
3581 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3582 qed_chain_consume(&qp->rq.pbl);
3583 qedr_inc_sw_cons(&qp->rq);
3584 }
3585
3586 return cnt;
3587}
3588
3589static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3590 struct rdma_cqe_responder *resp, int *update)
3591{
3592 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3593 consume_cqe(cq);
3594 *update |= 1;
3595 }
3596}
3597
3598static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3599 struct qedr_cq *cq, int num_entries,
3600 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3601 int *update)
3602{
3603 int cnt;
3604
3605 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3606 cnt = process_resp_flush(qp, cq, num_entries, wc,
3607 resp->rq_cons);
3608 try_consume_resp_cqe(cq, qp, resp, update);
3609 } else {
3610 cnt = process_resp_one(dev, qp, cq, wc, resp);
3611 consume_cqe(cq);
3612 *update |= 1;
3613 }
3614
3615 return cnt;
3616}
3617
3618static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3619 struct rdma_cqe_requester *req, int *update)
3620{
3621 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3622 consume_cqe(cq);
3623 *update |= 1;
3624 }
3625}
3626
3627int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3628{
3629 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3630 struct qedr_cq *cq = get_qedr_cq(ibcq);
3631 union rdma_cqe *cqe = cq->latest_cqe;
3632 u32 old_cons, new_cons;
3633 unsigned long flags;
3634 int update = 0;
3635 int done = 0;
3636
Amrani, Ram4dd72632017-04-27 13:35:34 +03003637 if (cq->destroyed) {
3638 DP_ERR(dev,
3639 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3640 cq, cq->icid);
3641 return 0;
3642 }
3643
Ram Amrani04886772016-10-10 13:15:38 +03003644 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3645 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3646
Ram Amraniafa0e132016-10-10 13:15:36 +03003647 spin_lock_irqsave(&cq->cq_lock, flags);
3648 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3649 while (num_entries && is_valid_cqe(cq, cqe)) {
3650 struct qedr_qp *qp;
3651 int cnt = 0;
3652
3653 /* prevent speculative reads of any field of CQE */
3654 rmb();
3655
3656 qp = cqe_get_qp(cqe);
3657 if (!qp) {
3658 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3659 break;
3660 }
3661
3662 wc->qp = &qp->ibqp;
3663
3664 switch (cqe_get_type(cqe)) {
3665 case RDMA_CQE_TYPE_REQUESTER:
3666 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3667 &cqe->req);
3668 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3669 break;
3670 case RDMA_CQE_TYPE_RESPONDER_RQ:
3671 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3672 &cqe->resp, &update);
3673 break;
3674 case RDMA_CQE_TYPE_INVALID:
3675 default:
3676 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3677 cqe_get_type(cqe));
3678 }
3679 num_entries -= cnt;
3680 wc += cnt;
3681 done += cnt;
3682
3683 cqe = get_cqe(cq);
3684 }
3685 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3686
3687 cq->cq_cons += new_cons - old_cons;
3688
3689 if (update)
3690 /* doorbell notifies abount latest VALID entry,
3691 * but chain already point to the next INVALID one
3692 */
3693 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3694
3695 spin_unlock_irqrestore(&cq->cq_lock, flags);
3696 return done;
3697}
Ram Amrani993d1b52016-10-10 13:15:39 +03003698
3699int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3700 u8 port_num,
3701 const struct ib_wc *in_wc,
3702 const struct ib_grh *in_grh,
3703 const struct ib_mad_hdr *mad_hdr,
3704 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3705 size_t *out_mad_size, u16 *out_mad_pkey_index)
3706{
3707 struct qedr_dev *dev = get_qedr_dev(ibdev);
3708
3709 DP_DEBUG(dev, QEDR_MSG_GSI,
3710 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3711 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3712 mad_hdr->class_specific, mad_hdr->class_version,
3713 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3714 return IB_MAD_RESULT_SUCCESS;
3715}