blob: 716fae0631223222e29fbb0978e9d70f67fbe2d2 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Kalderon, Michal99d195c2017-07-26 14:41:51 +030052#include "qedr_roce_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
Amrani, Ramc75d3ec2017-06-26 19:05:04 +030056static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
Ram Amrania7efd772016-10-10 13:15:33 +030064int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
Kalderon, Michale6a38c52017-07-26 14:41:52 +030073int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
Ram Amraniac1b36e2016-10-10 13:15:32 +030087int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
88 union ib_gid *sgid)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 int rc = 0;
92
93 if (!rdma_cap_roce_gid_table(ibdev, port))
94 return -ENODEV;
95
96 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
97 if (rc == -EAGAIN) {
98 memcpy(sgid, &zgid, sizeof(*sgid));
99 return 0;
100 }
101
102 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
103 sgid->global.interface_id, sgid->global.subnet_prefix);
104
105 return rc;
106}
107
108int qedr_add_gid(struct ib_device *device, u8 port_num,
109 unsigned int index, const union ib_gid *gid,
110 const struct ib_gid_attr *attr, void **context)
111{
112 if (!rdma_cap_roce_gid_table(device, port_num))
113 return -EINVAL;
114
115 if (port_num > QEDR_MAX_PORT)
116 return -EINVAL;
117
118 if (!context)
119 return -EINVAL;
120
121 return 0;
122}
123
124int qedr_del_gid(struct ib_device *device, u8 port_num,
125 unsigned int index, void **context)
126{
127 if (!rdma_cap_roce_gid_table(device, port_num))
128 return -EINVAL;
129
130 if (port_num > QEDR_MAX_PORT)
131 return -EINVAL;
132
133 if (!context)
134 return -EINVAL;
135
136 return 0;
137}
138
139int qedr_query_device(struct ib_device *ibdev,
140 struct ib_device_attr *attr, struct ib_udata *udata)
141{
142 struct qedr_dev *dev = get_qedr_dev(ibdev);
143 struct qedr_device_attr *qattr = &dev->attr;
144
145 if (!dev->rdma_ctx) {
146 DP_ERR(dev,
147 "qedr_query_device called with invalid params rdma_ctx=%p\n",
148 dev->rdma_ctx);
149 return -EINVAL;
150 }
151
152 memset(attr, 0, sizeof(*attr));
153
154 attr->fw_ver = qattr->fw_ver;
155 attr->sys_image_guid = qattr->sys_image_guid;
156 attr->max_mr_size = qattr->max_mr_size;
157 attr->page_size_cap = qattr->page_size_caps;
158 attr->vendor_id = qattr->vendor_id;
159 attr->vendor_part_id = qattr->vendor_part_id;
160 attr->hw_ver = qattr->hw_ver;
161 attr->max_qp = qattr->max_qp;
162 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
163 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
164 IB_DEVICE_RC_RNR_NAK_GEN |
165 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
166
167 attr->max_sge = qattr->max_sge;
168 attr->max_sge_rd = qattr->max_sge;
169 attr->max_cq = qattr->max_cq;
170 attr->max_cqe = qattr->max_cqe;
171 attr->max_mr = qattr->max_mr;
172 attr->max_mw = qattr->max_mw;
173 attr->max_pd = qattr->max_pd;
174 attr->atomic_cap = dev->atomic_cap;
175 attr->max_fmr = qattr->max_fmr;
176 attr->max_map_per_fmr = 16;
177 attr->max_qp_init_rd_atom =
178 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
179 attr->max_qp_rd_atom =
180 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
181 attr->max_qp_init_rd_atom);
182
183 attr->max_srq = qattr->max_srq;
184 attr->max_srq_sge = qattr->max_srq_sge;
185 attr->max_srq_wr = qattr->max_srq_wr;
186
187 attr->local_ca_ack_delay = qattr->dev_ack_delay;
188 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
189 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
190 attr->max_ah = qattr->max_ah;
191
192 return 0;
193}
194
195#define QEDR_SPEED_SDR (1)
196#define QEDR_SPEED_DDR (2)
197#define QEDR_SPEED_QDR (4)
198#define QEDR_SPEED_FDR10 (8)
199#define QEDR_SPEED_FDR (16)
200#define QEDR_SPEED_EDR (32)
201
202static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
203 u8 *ib_width)
204{
205 switch (speed) {
206 case 1000:
207 *ib_speed = QEDR_SPEED_SDR;
208 *ib_width = IB_WIDTH_1X;
209 break;
210 case 10000:
211 *ib_speed = QEDR_SPEED_QDR;
212 *ib_width = IB_WIDTH_1X;
213 break;
214
215 case 20000:
216 *ib_speed = QEDR_SPEED_DDR;
217 *ib_width = IB_WIDTH_4X;
218 break;
219
220 case 25000:
221 *ib_speed = QEDR_SPEED_EDR;
222 *ib_width = IB_WIDTH_1X;
223 break;
224
225 case 40000:
226 *ib_speed = QEDR_SPEED_QDR;
227 *ib_width = IB_WIDTH_4X;
228 break;
229
230 case 50000:
231 *ib_speed = QEDR_SPEED_QDR;
232 *ib_width = IB_WIDTH_4X;
233 break;
234
235 case 100000:
236 *ib_speed = QEDR_SPEED_EDR;
237 *ib_width = IB_WIDTH_4X;
238 break;
239
240 default:
241 /* Unsupported */
242 *ib_speed = QEDR_SPEED_SDR;
243 *ib_width = IB_WIDTH_1X;
244 }
245}
246
247int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
248{
249 struct qedr_dev *dev;
250 struct qed_rdma_port *rdma_port;
251
252 dev = get_qedr_dev(ibdev);
253 if (port > 1) {
254 DP_ERR(dev, "invalid_port=0x%x\n", port);
255 return -EINVAL;
256 }
257
258 if (!dev->rdma_ctx) {
259 DP_ERR(dev, "rdma_ctx is NULL\n");
260 return -EINVAL;
261 }
262
263 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300264
Or Gerlitzc4550c62017-01-24 13:02:39 +0200265 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300266 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
267 attr->state = IB_PORT_ACTIVE;
268 attr->phys_state = 5;
269 } else {
270 attr->state = IB_PORT_DOWN;
271 attr->phys_state = 3;
272 }
273 attr->max_mtu = IB_MTU_4096;
274 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
275 attr->lid = 0;
276 attr->lmc = 0;
277 attr->sm_lid = 0;
278 attr->sm_sl = 0;
279 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
Kalderon, Michalf5b1b172017-07-26 14:41:53 +0300280 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
281 attr->gid_tbl_len = 1;
282 attr->pkey_tbl_len = 1;
283 } else {
284 attr->gid_tbl_len = QEDR_MAX_SGID;
285 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
286 }
Ram Amraniac1b36e2016-10-10 13:15:32 +0300287 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
288 attr->qkey_viol_cntr = 0;
289 get_link_speed_and_width(rdma_port->link_speed,
290 &attr->active_speed, &attr->active_width);
291 attr->max_msg_sz = rdma_port->max_msg_size;
292 attr->max_vl_num = 4;
293
294 return 0;
295}
296
297int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
298 struct ib_port_modify *props)
299{
300 struct qedr_dev *dev;
301
302 dev = get_qedr_dev(ibdev);
303 if (port > 1) {
304 DP_ERR(dev, "invalid_port=0x%x\n", port);
305 return -EINVAL;
306 }
307
308 return 0;
309}
310
311static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
312 unsigned long len)
313{
314 struct qedr_mm *mm;
315
316 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
317 if (!mm)
318 return -ENOMEM;
319
320 mm->key.phy_addr = phy_addr;
321 /* This function might be called with a length which is not a multiple
322 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
323 * forces this granularity by increasing the requested size if needed.
324 * When qedr_mmap is called, it will search the list with the updated
325 * length as a key. To prevent search failures, the length is rounded up
326 * in advance to PAGE_SIZE.
327 */
328 mm->key.len = roundup(len, PAGE_SIZE);
329 INIT_LIST_HEAD(&mm->entry);
330
331 mutex_lock(&uctx->mm_list_lock);
332 list_add(&mm->entry, &uctx->mm_head);
333 mutex_unlock(&uctx->mm_list_lock);
334
335 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
336 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
337 (unsigned long long)mm->key.phy_addr,
338 (unsigned long)mm->key.len, uctx);
339
340 return 0;
341}
342
343static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
344 unsigned long len)
345{
346 bool found = false;
347 struct qedr_mm *mm;
348
349 mutex_lock(&uctx->mm_list_lock);
350 list_for_each_entry(mm, &uctx->mm_head, entry) {
351 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
352 continue;
353
354 found = true;
355 break;
356 }
357 mutex_unlock(&uctx->mm_list_lock);
358 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
359 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
360 mm->key.phy_addr, mm->key.len, uctx, found);
361
362 return found;
363}
364
365struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
366 struct ib_udata *udata)
367{
368 int rc;
369 struct qedr_ucontext *ctx;
370 struct qedr_alloc_ucontext_resp uresp;
371 struct qedr_dev *dev = get_qedr_dev(ibdev);
372 struct qed_rdma_add_user_out_params oparams;
373
374 if (!udata)
375 return ERR_PTR(-EFAULT);
376
377 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
378 if (!ctx)
379 return ERR_PTR(-ENOMEM);
380
381 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
382 if (rc) {
383 DP_ERR(dev,
384 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
385 rc);
386 goto err;
387 }
388
389 ctx->dpi = oparams.dpi;
390 ctx->dpi_addr = oparams.dpi_addr;
391 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
392 ctx->dpi_size = oparams.dpi_size;
393 INIT_LIST_HEAD(&ctx->mm_head);
394 mutex_init(&ctx->mm_list_lock);
395
396 memset(&uresp, 0, sizeof(uresp));
397
Amrani, Ramad84dad2017-06-26 19:05:05 +0300398 uresp.dpm_enabled = dev->user_dpm_enabled;
Amrani, Ram67cbe352017-06-26 19:05:06 +0300399 uresp.wids_enabled = 1;
400 uresp.wid_count = oparams.wid_count;
Ram Amraniac1b36e2016-10-10 13:15:32 +0300401 uresp.db_pa = ctx->dpi_phys_addr;
402 uresp.db_size = ctx->dpi_size;
403 uresp.max_send_wr = dev->attr.max_sqe;
404 uresp.max_recv_wr = dev->attr.max_rqe;
405 uresp.max_srq_wr = dev->attr.max_srq_wr;
406 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
407 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
408 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
409 uresp.max_cqes = QEDR_MAX_CQES;
410
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300411 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amraniac1b36e2016-10-10 13:15:32 +0300412 if (rc)
413 goto err;
414
415 ctx->dev = dev;
416
417 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
418 if (rc)
419 goto err;
420
421 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
422 &ctx->ibucontext);
423 return &ctx->ibucontext;
424
425err:
426 kfree(ctx);
427 return ERR_PTR(rc);
428}
429
430int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
431{
432 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
433 struct qedr_mm *mm, *tmp;
434 int status = 0;
435
436 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
437 uctx);
438 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
439
440 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
441 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
442 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
443 mm->key.phy_addr, mm->key.len, uctx);
444 list_del(&mm->entry);
445 kfree(mm);
446 }
447
448 kfree(uctx);
449 return status;
450}
451
452int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
453{
454 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
455 struct qedr_dev *dev = get_qedr_dev(context->device);
456 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
457 u64 unmapped_db = dev->db_phys_addr;
458 unsigned long len = (vma->vm_end - vma->vm_start);
459 int rc = 0;
460 bool found;
461
462 DP_DEBUG(dev, QEDR_MSG_INIT,
463 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
464 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
465 if (vma->vm_start & (PAGE_SIZE - 1)) {
466 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
467 vma->vm_start);
468 return -EINVAL;
469 }
470
471 found = qedr_search_mmap(ucontext, vm_page, len);
472 if (!found) {
473 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
474 vma->vm_pgoff);
475 return -EINVAL;
476 }
477
478 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
479
480 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
481 dev->db_size))) {
482 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
483 if (vma->vm_flags & VM_READ) {
484 DP_ERR(dev, "Trying to map doorbell bar for read\n");
485 return -EPERM;
486 }
487
488 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
489
490 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
491 PAGE_SIZE, vma->vm_page_prot);
492 } else {
493 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
494 rc = remap_pfn_range(vma, vma->vm_start,
495 vma->vm_pgoff, len, vma->vm_page_prot);
496 }
497 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
498 return rc;
499}
Ram Amrania7efd772016-10-10 13:15:33 +0300500
501struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
502 struct ib_ucontext *context, struct ib_udata *udata)
503{
504 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300505 struct qedr_pd *pd;
506 u16 pd_id;
507 int rc;
508
509 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
510 (udata && context) ? "User Lib" : "Kernel");
511
512 if (!dev->rdma_ctx) {
Colin Ian King847cb1a2017-08-24 09:25:53 +0100513 DP_ERR(dev, "invalid RDMA context\n");
Ram Amrania7efd772016-10-10 13:15:33 +0300514 return ERR_PTR(-EINVAL);
515 }
516
517 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
518 if (!pd)
519 return ERR_PTR(-ENOMEM);
520
Ram Amrani9c1e0222017-01-24 13:51:42 +0200521 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
522 if (rc)
523 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300524
Ram Amrania7efd772016-10-10 13:15:33 +0300525 pd->pd_id = pd_id;
526
527 if (udata && context) {
Ram Amrani9c1e0222017-01-24 13:51:42 +0200528 struct qedr_alloc_pd_uresp uresp;
529
530 uresp.pd_id = pd_id;
531
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300532 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200533 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300534 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200535 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
536 goto err;
537 }
538
539 pd->uctx = get_qedr_ucontext(context);
540 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300541 }
542
543 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200544
545err:
546 kfree(pd);
547 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300548}
549
550int qedr_dealloc_pd(struct ib_pd *ibpd)
551{
552 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
553 struct qedr_pd *pd = get_qedr_pd(ibpd);
554
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100555 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300556 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100557 return -EINVAL;
558 }
Ram Amrania7efd772016-10-10 13:15:33 +0300559
560 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
561 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
562
563 kfree(pd);
564
565 return 0;
566}
567
568static void qedr_free_pbl(struct qedr_dev *dev,
569 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
570{
571 struct pci_dev *pdev = dev->pdev;
572 int i;
573
574 for (i = 0; i < pbl_info->num_pbls; i++) {
575 if (!pbl[i].va)
576 continue;
577 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
578 pbl[i].va, pbl[i].pa);
579 }
580
581 kfree(pbl);
582}
583
584#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
585#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
586
587#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
588#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
589#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
590
591static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
592 struct qedr_pbl_info *pbl_info,
593 gfp_t flags)
594{
595 struct pci_dev *pdev = dev->pdev;
596 struct qedr_pbl *pbl_table;
597 dma_addr_t *pbl_main_tbl;
598 dma_addr_t pa;
599 void *va;
600 int i;
601
602 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
603 if (!pbl_table)
604 return ERR_PTR(-ENOMEM);
605
606 for (i = 0; i < pbl_info->num_pbls; i++) {
Himanshu Jha7bced912017-12-31 18:01:03 +0530607 va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
608 &pa, flags);
Ram Amrania7efd772016-10-10 13:15:33 +0300609 if (!va)
610 goto err;
611
Ram Amrania7efd772016-10-10 13:15:33 +0300612 pbl_table[i].va = va;
613 pbl_table[i].pa = pa;
614 }
615
616 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
617 * the first one with physical pointers to all of the rest
618 */
619 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
620 for (i = 0; i < pbl_info->num_pbls - 1; i++)
621 pbl_main_tbl[i] = pbl_table[i + 1].pa;
622
623 return pbl_table;
624
625err:
626 for (i--; i >= 0; i--)
627 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
628 pbl_table[i].va, pbl_table[i].pa);
629
630 qedr_free_pbl(dev, pbl_info, pbl_table);
631
632 return ERR_PTR(-ENOMEM);
633}
634
635static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
636 struct qedr_pbl_info *pbl_info,
637 u32 num_pbes, int two_layer_capable)
638{
639 u32 pbl_capacity;
640 u32 pbl_size;
641 u32 num_pbls;
642
643 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
644 if (num_pbes > MAX_PBES_TWO_LAYER) {
645 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
646 num_pbes);
647 return -EINVAL;
648 }
649
650 /* calculate required pbl page size */
651 pbl_size = MIN_FW_PBL_PAGE_SIZE;
652 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
653 NUM_PBES_ON_PAGE(pbl_size);
654
655 while (pbl_capacity < num_pbes) {
656 pbl_size *= 2;
657 pbl_capacity = pbl_size / sizeof(u64);
658 pbl_capacity = pbl_capacity * pbl_capacity;
659 }
660
661 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
662 num_pbls++; /* One for the layer0 ( points to the pbls) */
663 pbl_info->two_layered = true;
664 } else {
665 /* One layered PBL */
666 num_pbls = 1;
667 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
668 roundup_pow_of_two((num_pbes * sizeof(u64))));
669 pbl_info->two_layered = false;
670 }
671
672 pbl_info->num_pbls = num_pbls;
673 pbl_info->pbl_size = pbl_size;
674 pbl_info->num_pbes = num_pbes;
675
676 DP_DEBUG(dev, QEDR_MSG_MR,
677 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
678 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
679
680 return 0;
681}
682
683static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
684 struct qedr_pbl *pbl,
Ram Amranie57bb6b2017-06-05 16:32:27 +0300685 struct qedr_pbl_info *pbl_info, u32 pg_shift)
Ram Amrania7efd772016-10-10 13:15:33 +0300686{
687 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300688 u32 fw_pg_cnt, fw_pg_per_umem_pg;
Ram Amrania7efd772016-10-10 13:15:33 +0300689 struct qedr_pbl *pbl_tbl;
690 struct scatterlist *sg;
691 struct regpair *pbe;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300692 u64 pg_addr;
Ram Amrania7efd772016-10-10 13:15:33 +0300693 int entry;
Ram Amrania7efd772016-10-10 13:15:33 +0300694
695 if (!pbl_info->num_pbes)
696 return;
697
698 /* If we have a two layered pbl, the first pbl points to the rest
699 * of the pbls and the first entry lays on the second pbl in the table
700 */
701 if (pbl_info->two_layered)
702 pbl_tbl = &pbl[1];
703 else
704 pbl_tbl = pbl;
705
706 pbe = (struct regpair *)pbl_tbl->va;
707 if (!pbe) {
708 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
709 return;
710 }
711
712 pbe_cnt = 0;
713
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300714 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300715
Ram Amranie57bb6b2017-06-05 16:32:27 +0300716 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
717
Ram Amrania7efd772016-10-10 13:15:33 +0300718 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
719 pages = sg_dma_len(sg) >> shift;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300720 pg_addr = sg_dma_address(sg);
Ram Amrania7efd772016-10-10 13:15:33 +0300721 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
Ram Amranie57bb6b2017-06-05 16:32:27 +0300722 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
723 pbe->lo = cpu_to_le32(pg_addr);
724 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
Ram Amrania7efd772016-10-10 13:15:33 +0300725
Ram Amranie57bb6b2017-06-05 16:32:27 +0300726 pg_addr += BIT(pg_shift);
727 pbe_cnt++;
728 total_num_pbes++;
729 pbe++;
Ram Amrania7efd772016-10-10 13:15:33 +0300730
Ram Amranie57bb6b2017-06-05 16:32:27 +0300731 if (total_num_pbes == pbl_info->num_pbes)
732 return;
733
734 /* If the given pbl is full storing the pbes,
735 * move to next pbl.
736 */
737 if (pbe_cnt ==
738 (pbl_info->pbl_size / sizeof(u64))) {
739 pbl_tbl++;
740 pbe = (struct regpair *)pbl_tbl->va;
741 pbe_cnt = 0;
742 }
743
744 fw_pg_cnt++;
Ram Amrania7efd772016-10-10 13:15:33 +0300745 }
746 }
747 }
748}
749
750static int qedr_copy_cq_uresp(struct qedr_dev *dev,
751 struct qedr_cq *cq, struct ib_udata *udata)
752{
753 struct qedr_create_cq_uresp uresp;
754 int rc;
755
756 memset(&uresp, 0, sizeof(uresp));
757
758 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
759 uresp.icid = cq->icid;
760
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300761 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrania7efd772016-10-10 13:15:33 +0300762 if (rc)
763 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
764
765 return rc;
766}
767
768static void consume_cqe(struct qedr_cq *cq)
769{
770 if (cq->latest_cqe == cq->toggle_cqe)
771 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
772
773 cq->latest_cqe = qed_chain_consume(&cq->pbl);
774}
775
776static inline int qedr_align_cq_entries(int entries)
777{
778 u64 size, aligned_size;
779
780 /* We allocate an extra entry that we don't report to the FW. */
781 size = (entries + 1) * QEDR_CQE_SIZE;
782 aligned_size = ALIGN(size, PAGE_SIZE);
783
784 return aligned_size / QEDR_CQE_SIZE;
785}
786
787static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
788 struct qedr_dev *dev,
789 struct qedr_userq *q,
790 u64 buf_addr, size_t buf_len,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300791 int access, int dmasync,
792 int alloc_and_init)
Ram Amrania7efd772016-10-10 13:15:33 +0300793{
Ram Amranie57bb6b2017-06-05 16:32:27 +0300794 u32 fw_pages;
Ram Amrania7efd772016-10-10 13:15:33 +0300795 int rc;
796
797 q->buf_addr = buf_addr;
798 q->buf_len = buf_len;
799 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
800 if (IS_ERR(q->umem)) {
801 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
802 PTR_ERR(q->umem));
803 return PTR_ERR(q->umem);
804 }
805
Ram Amranie57bb6b2017-06-05 16:32:27 +0300806 fw_pages = ib_umem_page_count(q->umem) <<
807 (q->umem->page_shift - FW_PAGE_SHIFT);
808
809 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
Ram Amrania7efd772016-10-10 13:15:33 +0300810 if (rc)
811 goto err0;
812
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300813 if (alloc_and_init) {
814 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
815 if (IS_ERR(q->pbl_tbl)) {
816 rc = PTR_ERR(q->pbl_tbl);
817 goto err0;
818 }
Ram Amranie57bb6b2017-06-05 16:32:27 +0300819 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
820 FW_PAGE_SHIFT);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300821 } else {
822 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
Dan Carpenter89fd2572017-08-25 11:18:39 +0300823 if (!q->pbl_tbl) {
824 rc = -ENOMEM;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300825 goto err0;
Dan Carpenter89fd2572017-08-25 11:18:39 +0300826 }
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300827 }
Ram Amrania7efd772016-10-10 13:15:33 +0300828
829 return 0;
830
831err0:
832 ib_umem_release(q->umem);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300833 q->umem = NULL;
Ram Amrania7efd772016-10-10 13:15:33 +0300834
835 return rc;
836}
837
838static inline void qedr_init_cq_params(struct qedr_cq *cq,
839 struct qedr_ucontext *ctx,
840 struct qedr_dev *dev, int vector,
841 int chain_entries, int page_cnt,
842 u64 pbl_ptr,
843 struct qed_rdma_create_cq_in_params
844 *params)
845{
846 memset(params, 0, sizeof(*params));
847 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
848 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
849 params->cnq_id = vector;
850 params->cq_size = chain_entries - 1;
851 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
852 params->pbl_num_pages = page_cnt;
853 params->pbl_ptr = pbl_ptr;
854 params->pbl_two_level = 0;
855}
856
857static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
858{
859 /* Flush data before signalling doorbell */
860 wmb();
861 cq->db.data.agg_flags = flags;
862 cq->db.data.value = cpu_to_le32(cons);
863 writeq(cq->db.raw, cq->db_addr);
864
865 /* Make sure write would stick */
866 mmiowb();
867}
868
869int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
870{
871 struct qedr_cq *cq = get_qedr_cq(ibcq);
872 unsigned long sflags;
Amrani, Ram4dd72632017-04-27 13:35:34 +0300873 struct qedr_dev *dev;
874
875 dev = get_qedr_dev(ibcq->device);
876
877 if (cq->destroyed) {
878 DP_ERR(dev,
879 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
880 cq, cq->icid);
881 return -EINVAL;
882 }
883
Ram Amrania7efd772016-10-10 13:15:33 +0300884
885 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
886 return 0;
887
888 spin_lock_irqsave(&cq->cq_lock, sflags);
889
890 cq->arm_flags = 0;
891
892 if (flags & IB_CQ_SOLICITED)
893 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
894
895 if (flags & IB_CQ_NEXT_COMP)
896 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
897
898 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
899
900 spin_unlock_irqrestore(&cq->cq_lock, sflags);
901
902 return 0;
903}
904
905struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
906 const struct ib_cq_init_attr *attr,
907 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
908{
909 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
910 struct qed_rdma_destroy_cq_out_params destroy_oparams;
911 struct qed_rdma_destroy_cq_in_params destroy_iparams;
912 struct qedr_dev *dev = get_qedr_dev(ibdev);
913 struct qed_rdma_create_cq_in_params params;
914 struct qedr_create_cq_ureq ureq;
915 int vector = attr->comp_vector;
916 int entries = attr->cqe;
917 struct qedr_cq *cq;
918 int chain_entries;
919 int page_cnt;
920 u64 pbl_ptr;
921 u16 icid;
922 int rc;
923
924 DP_DEBUG(dev, QEDR_MSG_INIT,
925 "create_cq: called from %s. entries=%d, vector=%d\n",
926 udata ? "User Lib" : "Kernel", entries, vector);
927
928 if (entries > QEDR_MAX_CQES) {
929 DP_ERR(dev,
930 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
931 entries, QEDR_MAX_CQES);
932 return ERR_PTR(-EINVAL);
933 }
934
935 chain_entries = qedr_align_cq_entries(entries);
936 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
937
938 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
939 if (!cq)
940 return ERR_PTR(-ENOMEM);
941
942 if (udata) {
943 memset(&ureq, 0, sizeof(ureq));
944 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
945 DP_ERR(dev,
946 "create cq: problem copying data from user space\n");
947 goto err0;
948 }
949
950 if (!ureq.len) {
951 DP_ERR(dev,
952 "create cq: cannot create a cq with 0 entries\n");
953 goto err0;
954 }
955
956 cq->cq_type = QEDR_CQ_TYPE_USER;
957
958 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300959 ureq.len, IB_ACCESS_LOCAL_WRITE,
960 1, 1);
Ram Amrania7efd772016-10-10 13:15:33 +0300961 if (rc)
962 goto err0;
963
964 pbl_ptr = cq->q.pbl_tbl->pa;
965 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200966
967 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300968 } else {
969 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
970
971 rc = dev->ops->common->chain_alloc(dev->cdev,
972 QED_CHAIN_USE_TO_CONSUME,
973 QED_CHAIN_MODE_PBL,
974 QED_CHAIN_CNT_TYPE_U32,
975 chain_entries,
976 sizeof(union rdma_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +0300977 &cq->pbl, NULL);
Ram Amrania7efd772016-10-10 13:15:33 +0300978 if (rc)
979 goto err1;
980
981 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
982 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200983 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300984 }
985
986 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
987 pbl_ptr, &params);
988
989 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
990 if (rc)
991 goto err2;
992
993 cq->icid = icid;
994 cq->sig = QEDR_CQ_MAGIC_NUMBER;
995 spin_lock_init(&cq->cq_lock);
996
997 if (ib_ctx) {
998 rc = qedr_copy_cq_uresp(dev, cq, udata);
999 if (rc)
1000 goto err3;
1001 } else {
1002 /* Generate doorbell address. */
1003 cq->db_addr = dev->db_addr +
1004 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1005 cq->db.data.icid = cq->icid;
1006 cq->db.data.params = DB_AGG_CMD_SET <<
1007 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1008
1009 /* point to the very last element, passing it we will toggle */
1010 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1011 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1012 cq->latest_cqe = NULL;
1013 consume_cqe(cq);
1014 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1015 }
1016
1017 DP_DEBUG(dev, QEDR_MSG_CQ,
1018 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1019 cq->icid, cq, params.cq_size);
1020
1021 return &cq->ibcq;
1022
1023err3:
1024 destroy_iparams.icid = cq->icid;
1025 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1026 &destroy_oparams);
1027err2:
1028 if (udata)
1029 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1030 else
1031 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1032err1:
1033 if (udata)
1034 ib_umem_release(cq->q.umem);
1035err0:
1036 kfree(cq);
1037 return ERR_PTR(-EINVAL);
1038}
1039
1040int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1041{
1042 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1043 struct qedr_cq *cq = get_qedr_cq(ibcq);
1044
1045 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1046
1047 return 0;
1048}
1049
Amrani, Ram4dd72632017-04-27 13:35:34 +03001050#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1051#define QEDR_DESTROY_CQ_ITER_DURATION (10)
1052
Ram Amrania7efd772016-10-10 13:15:33 +03001053int qedr_destroy_cq(struct ib_cq *ibcq)
1054{
1055 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1056 struct qed_rdma_destroy_cq_out_params oparams;
1057 struct qed_rdma_destroy_cq_in_params iparams;
1058 struct qedr_cq *cq = get_qedr_cq(ibcq);
Amrani, Ram4dd72632017-04-27 13:35:34 +03001059 int iter;
Amrani, Ram942b3b22017-04-27 13:35:33 +03001060 int rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001061
Amrani, Ram942b3b22017-04-27 13:35:33 +03001062 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
Ram Amrania7efd772016-10-10 13:15:33 +03001063
Amrani, Ram4dd72632017-04-27 13:35:34 +03001064 cq->destroyed = 1;
1065
Ram Amrania7efd772016-10-10 13:15:33 +03001066 /* GSIs CQs are handled by driver, so they don't exist in the FW */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001067 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1068 goto done;
Amrani, Rama1211352016-12-22 14:40:34 +02001069
Amrani, Ram942b3b22017-04-27 13:35:33 +03001070 iparams.icid = cq->icid;
1071 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1072 if (rc)
1073 return rc;
1074
1075 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +03001076
1077 if (ibcq->uobject && ibcq->uobject->context) {
1078 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1079 ib_umem_release(cq->q.umem);
1080 }
1081
Amrani, Ram4dd72632017-04-27 13:35:34 +03001082 /* We don't want the IRQ handler to handle a non-existing CQ so we
1083 * wait until all CNQ interrupts, if any, are received. This will always
1084 * happen and will always happen very fast. If not, then a serious error
1085 * has occured. That is why we can use a long delay.
1086 * We spin for a short time so we don’t lose time on context switching
1087 * in case all the completions are handled in that span. Otherwise
1088 * we sleep for a while and check again. Since the CNQ may be
1089 * associated with (only) the current CPU we use msleep to allow the
1090 * current CPU to be freed.
1091 * The CNQ notification is increased in qedr_irq_handler().
1092 */
1093 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1094 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1095 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1096 iter--;
1097 }
1098
1099 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1100 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1101 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1102 iter--;
1103 }
1104
1105 if (oparams.num_cq_notif != cq->cnq_notif)
1106 goto err;
1107
1108 /* Note that we don't need to have explicit code to wait for the
1109 * completion of the event handler because it is invoked from the EQ.
1110 * Since the destroy CQ ramrod has also been received on the EQ we can
1111 * be certain that there's no event handler in process.
1112 */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001113done:
Amrani, Ram4dd72632017-04-27 13:35:34 +03001114 cq->sig = ~cq->sig;
1115
Ram Amrania7efd772016-10-10 13:15:33 +03001116 kfree(cq);
1117
1118 return 0;
Amrani, Ram4dd72632017-04-27 13:35:34 +03001119
1120err:
1121 DP_ERR(dev,
1122 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1123 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1124
1125 return -EINVAL;
Ram Amrania7efd772016-10-10 13:15:33 +03001126}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001127
1128static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1129 struct ib_qp_attr *attr,
1130 int attr_mask,
1131 struct qed_rdma_modify_qp_in_params
1132 *qp_params)
1133{
1134 enum rdma_network_type nw_type;
1135 struct ib_gid_attr gid_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001136 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001137 union ib_gid gid;
1138 u32 ipv4_addr;
1139 int rc = 0;
1140 int i;
1141
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001142 rc = ib_get_cached_gid(ibqp->device,
1143 rdma_ah_get_port_num(&attr->ah_attr),
1144 grh->sgid_index, &gid, &gid_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001145 if (rc)
1146 return rc;
1147
1148 if (!memcmp(&gid, &zgid, sizeof(gid)))
1149 return -ENOENT;
1150
1151 if (gid_attr.ndev) {
1152 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1153
1154 dev_put(gid_attr.ndev);
1155 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1156 switch (nw_type) {
1157 case RDMA_NETWORK_IPV6:
1158 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1159 sizeof(qp_params->sgid));
1160 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001161 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001162 sizeof(qp_params->dgid));
1163 qp_params->roce_mode = ROCE_V2_IPV6;
1164 SET_FIELD(qp_params->modify_flags,
1165 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1166 break;
1167 case RDMA_NETWORK_IB:
1168 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1169 sizeof(qp_params->sgid));
1170 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001171 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001172 sizeof(qp_params->dgid));
1173 qp_params->roce_mode = ROCE_V1;
1174 break;
1175 case RDMA_NETWORK_IPV4:
1176 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1177 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1178 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1179 qp_params->sgid.ipv4_addr = ipv4_addr;
1180 ipv4_addr =
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001181 qedr_get_ipv4_from_gid(grh->dgid.raw);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001182 qp_params->dgid.ipv4_addr = ipv4_addr;
1183 SET_FIELD(qp_params->modify_flags,
1184 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1185 qp_params->roce_mode = ROCE_V2_IPV4;
1186 break;
1187 }
1188 }
1189
1190 for (i = 0; i < 4; i++) {
1191 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1192 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1193 }
1194
1195 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1196 qp_params->vlan_id = 0;
1197
1198 return 0;
1199}
1200
Ram Amranicecbcdd2016-10-10 13:15:34 +03001201static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1202 struct ib_qp_init_attr *attrs)
1203{
1204 struct qedr_device_attr *qattr = &dev->attr;
1205
1206 /* QP0... attrs->qp_type == IB_QPT_GSI */
1207 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1208 DP_DEBUG(dev, QEDR_MSG_QP,
1209 "create qp: unsupported qp type=0x%x requested\n",
1210 attrs->qp_type);
1211 return -EINVAL;
1212 }
1213
1214 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1215 DP_ERR(dev,
1216 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1217 attrs->cap.max_send_wr, qattr->max_sqe);
1218 return -EINVAL;
1219 }
1220
1221 if (attrs->cap.max_inline_data > qattr->max_inline) {
1222 DP_ERR(dev,
1223 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1224 attrs->cap.max_inline_data, qattr->max_inline);
1225 return -EINVAL;
1226 }
1227
1228 if (attrs->cap.max_send_sge > qattr->max_sge) {
1229 DP_ERR(dev,
1230 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1231 attrs->cap.max_send_sge, qattr->max_sge);
1232 return -EINVAL;
1233 }
1234
1235 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1236 DP_ERR(dev,
1237 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1238 attrs->cap.max_recv_sge, qattr->max_sge);
1239 return -EINVAL;
1240 }
1241
1242 /* Unprivileged user space cannot create special QP */
1243 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1244 DP_ERR(dev,
1245 "create qp: userspace can't create special QPs of type=0x%x\n",
1246 attrs->qp_type);
1247 return -EINVAL;
1248 }
1249
1250 return 0;
1251}
1252
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001253static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1254 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001255 struct qedr_qp *qp)
1256{
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001257 /* iWARP requires two doorbells per RQ. */
1258 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1259 uresp->rq_db_offset =
1260 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1261 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1262 } else {
1263 uresp->rq_db_offset =
1264 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1265 }
1266
Ram Amranicecbcdd2016-10-10 13:15:34 +03001267 uresp->rq_icid = qp->icid;
1268}
1269
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001270static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1271 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001272 struct qedr_qp *qp)
1273{
1274 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001275
1276 /* iWARP uses the same cid for rq and sq */
1277 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1278 uresp->sq_icid = qp->icid;
1279 else
1280 uresp->sq_icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001281}
1282
1283static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1284 struct qedr_qp *qp, struct ib_udata *udata)
1285{
1286 struct qedr_create_qp_uresp uresp;
1287 int rc;
1288
1289 memset(&uresp, 0, sizeof(uresp));
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001290 qedr_copy_sq_uresp(dev, &uresp, qp);
1291 qedr_copy_rq_uresp(dev, &uresp, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001292
1293 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1294 uresp.qp_id = qp->qp_id;
1295
Amrani, Ramc75d3ec2017-06-26 19:05:04 +03001296 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranicecbcdd2016-10-10 13:15:34 +03001297 if (rc)
1298 DP_ERR(dev,
1299 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1300 qp->icid);
1301
1302 return rc;
1303}
1304
Amrani, Ramdf158562016-12-22 14:52:24 +02001305static void qedr_set_common_qp_params(struct qedr_dev *dev,
1306 struct qedr_qp *qp,
1307 struct qedr_pd *pd,
1308 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001309{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001310 spin_lock_init(&qp->q_lock);
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001311 atomic_set(&qp->refcnt, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001312 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001313 qp->qp_type = attrs->qp_type;
1314 qp->max_inline_data = attrs->cap.max_inline_data;
1315 qp->sq.max_sges = attrs->cap.max_send_sge;
1316 qp->state = QED_ROCE_QP_STATE_RESET;
1317 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1318 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1319 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1320 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001321 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001322
1323 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001324 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1325 qp->rq.max_sges, qp->rq_cq->icid);
1326 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001327 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1328 pd->pd_id, qp->qp_type, qp->max_inline_data,
1329 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1330 DP_DEBUG(dev, QEDR_MSG_QP,
1331 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1332 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001333}
1334
Amrani, Ramdf158562016-12-22 14:52:24 +02001335static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001336{
1337 qp->sq.db = dev->db_addr +
1338 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1339 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001340 qp->rq.db = dev->db_addr +
1341 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1342 qp->rq.db_data.data.icid = qp->icid;
1343}
1344
Amrani, Ramdf158562016-12-22 14:52:24 +02001345static inline void
1346qedr_init_common_qp_in_params(struct qedr_dev *dev,
1347 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001348 struct qedr_qp *qp,
1349 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001350 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001351 struct qed_rdma_create_qp_in_params *params)
1352{
Amrani, Ramdf158562016-12-22 14:52:24 +02001353 /* QP handle to be written in an async event */
1354 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1355 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001356
Amrani, Ramdf158562016-12-22 14:52:24 +02001357 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1358 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1359 params->pd = pd->pd_id;
1360 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1361 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1362 params->stats_queue = 0;
1363 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1364 params->srq_id = 0;
1365 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001366}
1367
Amrani, Ramdf158562016-12-22 14:52:24 +02001368static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001369{
Amrani, Ramdf158562016-12-22 14:52:24 +02001370 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1371 "qp=%p. "
1372 "sq_addr=0x%llx, "
1373 "sq_len=%zd, "
1374 "rq_addr=0x%llx, "
1375 "rq_len=%zd"
1376 "\n",
1377 qp,
1378 qp->usq.buf_addr,
1379 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1380}
1381
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001382static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
1383{
1384 int rc;
1385
1386 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1387 return 0;
1388
1389 idr_preload(GFP_KERNEL);
1390 spin_lock_irq(&dev->idr_lock);
1391
1392 rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
1393
1394 spin_unlock_irq(&dev->idr_lock);
1395 idr_preload_end();
1396
1397 return rc < 0 ? rc : 0;
1398}
1399
1400static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
1401{
1402 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1403 return;
1404
1405 spin_lock_irq(&dev->idr_lock);
1406 idr_remove(&dev->qpidr, id);
1407 spin_unlock_irq(&dev->idr_lock);
1408}
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001409
1410static inline void
1411qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1412 struct qedr_qp *qp,
1413 struct qed_rdma_create_qp_out_params *out_params)
1414{
1415 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1416 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1417
1418 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1419 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1420
1421 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1422 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1423
1424 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1425 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1426}
1427
Amrani, Ramdf158562016-12-22 14:52:24 +02001428static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1429{
1430 if (qp->usq.umem)
1431 ib_umem_release(qp->usq.umem);
1432 qp->usq.umem = NULL;
1433
1434 if (qp->urq.umem)
1435 ib_umem_release(qp->urq.umem);
1436 qp->urq.umem = NULL;
1437}
1438
1439static int qedr_create_user_qp(struct qedr_dev *dev,
1440 struct qedr_qp *qp,
1441 struct ib_pd *ibpd,
1442 struct ib_udata *udata,
1443 struct ib_qp_init_attr *attrs)
1444{
1445 struct qed_rdma_create_qp_in_params in_params;
1446 struct qed_rdma_create_qp_out_params out_params;
1447 struct qedr_pd *pd = get_qedr_pd(ibpd);
1448 struct ib_ucontext *ib_ctx = NULL;
Amrani, Ramdf158562016-12-22 14:52:24 +02001449 struct qedr_create_qp_ureq ureq;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001450 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001451 int rc = -EINVAL;
1452
1453 ib_ctx = ibpd->uobject->context;
Amrani, Ramdf158562016-12-22 14:52:24 +02001454
1455 memset(&ureq, 0, sizeof(ureq));
1456 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1457 if (rc) {
1458 DP_ERR(dev, "Problem copying data from user space\n");
1459 return rc;
1460 }
1461
1462 /* SQ - read access only (0), dma sync not required (0) */
1463 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001464 ureq.sq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001465 if (rc)
1466 return rc;
1467
1468 /* RQ - read access only (0), dma sync not required (0) */
1469 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001470 ureq.rq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001471 if (rc)
1472 return rc;
1473
1474 memset(&in_params, 0, sizeof(in_params));
1475 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1476 in_params.qp_handle_lo = ureq.qp_handle_lo;
1477 in_params.qp_handle_hi = ureq.qp_handle_hi;
1478 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1479 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1480 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1481 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1482
1483 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1484 &in_params, &out_params);
1485
1486 if (!qp->qed_qp) {
1487 rc = -ENOMEM;
1488 goto err1;
1489 }
1490
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001491 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1492 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1493
Amrani, Ramdf158562016-12-22 14:52:24 +02001494 qp->qp_id = out_params.qp_id;
1495 qp->icid = out_params.icid;
1496
1497 rc = qedr_copy_qp_uresp(dev, qp, udata);
1498 if (rc)
1499 goto err;
1500
1501 qedr_qp_user_print(dev, qp);
1502
1503 return 0;
1504err:
1505 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1506 if (rc)
1507 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1508
1509err1:
1510 qedr_cleanup_user(dev, qp);
1511 return rc;
1512}
1513
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001514static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1515{
1516 qp->sq.db = dev->db_addr +
1517 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1518 qp->sq.db_data.data.icid = qp->icid;
1519
1520 qp->rq.db = dev->db_addr +
1521 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1522 qp->rq.db_data.data.icid = qp->icid;
1523 qp->rq.iwarp_db2 = dev->db_addr +
1524 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1525 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1526 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1527}
1528
Amrani, Ramdf158562016-12-22 14:52:24 +02001529static int
1530qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1531 struct qedr_qp *qp,
1532 struct qed_rdma_create_qp_in_params *in_params,
1533 u32 n_sq_elems, u32 n_rq_elems)
1534{
1535 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001536 int rc;
1537
Ram Amranicecbcdd2016-10-10 13:15:34 +03001538 rc = dev->ops->common->chain_alloc(dev->cdev,
1539 QED_CHAIN_USE_TO_PRODUCE,
1540 QED_CHAIN_MODE_PBL,
1541 QED_CHAIN_CNT_TYPE_U32,
1542 n_sq_elems,
1543 QEDR_SQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001544 &qp->sq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001545
1546 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001547 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001548
Amrani, Ramdf158562016-12-22 14:52:24 +02001549 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1550 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001551
Ram Amranicecbcdd2016-10-10 13:15:34 +03001552 rc = dev->ops->common->chain_alloc(dev->cdev,
1553 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1554 QED_CHAIN_MODE_PBL,
1555 QED_CHAIN_CNT_TYPE_U32,
1556 n_rq_elems,
1557 QEDR_RQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001558 &qp->rq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001559 if (rc)
1560 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001561
Amrani, Ramdf158562016-12-22 14:52:24 +02001562 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1563 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001564
Amrani, Ramdf158562016-12-22 14:52:24 +02001565 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1566 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001567
Amrani, Ramdf158562016-12-22 14:52:24 +02001568 if (!qp->qed_qp)
1569 return -EINVAL;
1570
1571 qp->qp_id = out_params.qp_id;
1572 qp->icid = out_params.icid;
1573
1574 qedr_set_roce_db_info(dev, qp);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001575 return rc;
1576}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001577
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001578static int
1579qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1580 struct qedr_qp *qp,
1581 struct qed_rdma_create_qp_in_params *in_params,
1582 u32 n_sq_elems, u32 n_rq_elems)
1583{
1584 struct qed_rdma_create_qp_out_params out_params;
1585 struct qed_chain_ext_pbl ext_pbl;
1586 int rc;
1587
1588 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1589 QEDR_SQE_ELEMENT_SIZE,
1590 QED_CHAIN_MODE_PBL);
1591 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1592 QEDR_RQE_ELEMENT_SIZE,
1593 QED_CHAIN_MODE_PBL);
1594
1595 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1596 in_params, &out_params);
1597
1598 if (!qp->qed_qp)
1599 return -EINVAL;
1600
1601 /* Now we allocate the chain */
1602 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1603 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1604
1605 rc = dev->ops->common->chain_alloc(dev->cdev,
1606 QED_CHAIN_USE_TO_PRODUCE,
1607 QED_CHAIN_MODE_PBL,
1608 QED_CHAIN_CNT_TYPE_U32,
1609 n_sq_elems,
1610 QEDR_SQE_ELEMENT_SIZE,
1611 &qp->sq.pbl, &ext_pbl);
1612
1613 if (rc)
1614 goto err;
1615
1616 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1617 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1618
1619 rc = dev->ops->common->chain_alloc(dev->cdev,
1620 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1621 QED_CHAIN_MODE_PBL,
1622 QED_CHAIN_CNT_TYPE_U32,
1623 n_rq_elems,
1624 QEDR_RQE_ELEMENT_SIZE,
1625 &qp->rq.pbl, &ext_pbl);
1626
1627 if (rc)
1628 goto err;
1629
1630 qp->qp_id = out_params.qp_id;
1631 qp->icid = out_params.icid;
1632
1633 qedr_set_iwarp_db_info(dev, qp);
1634 return rc;
1635
1636err:
1637 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1638
1639 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001640}
1641
Amrani, Ramdf158562016-12-22 14:52:24 +02001642static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001643{
Amrani, Ramdf158562016-12-22 14:52:24 +02001644 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1645 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001646
Amrani, Ramdf158562016-12-22 14:52:24 +02001647 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1648 kfree(qp->rqe_wr_id);
1649}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001650
Amrani, Ramdf158562016-12-22 14:52:24 +02001651static int qedr_create_kernel_qp(struct qedr_dev *dev,
1652 struct qedr_qp *qp,
1653 struct ib_pd *ibpd,
1654 struct ib_qp_init_attr *attrs)
1655{
1656 struct qed_rdma_create_qp_in_params in_params;
1657 struct qedr_pd *pd = get_qedr_pd(ibpd);
1658 int rc = -EINVAL;
1659 u32 n_rq_elems;
1660 u32 n_sq_elems;
1661 u32 n_sq_entries;
1662
1663 memset(&in_params, 0, sizeof(in_params));
1664
1665 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1666 * the ring. The ring should allow at least a single WR, even if the
1667 * user requested none, due to allocation issues.
1668 * We should add an extra WR since the prod and cons indices of
1669 * wqe_wr_id are managed in such a way that the WQ is considered full
1670 * when (prod+1)%max_wr==cons. We currently don't do that because we
1671 * double the number of entries due an iSER issue that pushes far more
1672 * WRs than indicated. If we decline its ib_post_send() then we get
1673 * error prints in the dmesg we'd like to avoid.
1674 */
1675 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1676 dev->attr.max_sqe);
1677
1678 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1679 GFP_KERNEL);
1680 if (!qp->wqe_wr_id) {
1681 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1682 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001683 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001684
Amrani, Ramdf158562016-12-22 14:52:24 +02001685 /* QP handle to be written in CQE */
1686 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1687 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001688
Amrani, Ramdf158562016-12-22 14:52:24 +02001689 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1690 * the ring. There ring should allow at least a single WR, even if the
1691 * user requested none, due to allocation issues.
1692 */
1693 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1694
1695 /* Allocate driver internal RQ array */
1696 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1697 GFP_KERNEL);
1698 if (!qp->rqe_wr_id) {
1699 DP_ERR(dev,
1700 "create qp: failed RQ shadow memory allocation\n");
1701 kfree(qp->wqe_wr_id);
1702 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001703 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001704
Amrani, Ramdf158562016-12-22 14:52:24 +02001705 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001706
Amrani, Ramdf158562016-12-22 14:52:24 +02001707 n_sq_entries = attrs->cap.max_send_wr;
1708 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1709 n_sq_entries = max_t(u32, n_sq_entries, 1);
1710 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001711
Amrani, Ramdf158562016-12-22 14:52:24 +02001712 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1713
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001714 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1715 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1716 n_sq_elems, n_rq_elems);
1717 else
1718 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1719 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001720 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001721 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001722
1723 return rc;
1724}
1725
1726struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1727 struct ib_qp_init_attr *attrs,
1728 struct ib_udata *udata)
1729{
1730 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001731 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001732 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001733 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001734 int rc = 0;
1735
1736 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1737 udata ? "user library" : "kernel", pd);
1738
1739 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1740 if (rc)
1741 return ERR_PTR(rc);
1742
Wei Yongjun181d8012016-10-28 16:33:47 +00001743 if (attrs->srq)
1744 return ERR_PTR(-EINVAL);
1745
Ram Amranicecbcdd2016-10-10 13:15:34 +03001746 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001747 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1748 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001749 get_qedr_cq(attrs->send_cq),
1750 get_qedr_cq(attrs->send_cq)->icid,
1751 get_qedr_cq(attrs->recv_cq),
1752 get_qedr_cq(attrs->recv_cq)->icid);
1753
Amrani, Ramdf158562016-12-22 14:52:24 +02001754 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1755 if (!qp) {
1756 DP_ERR(dev, "create qp: failed allocating memory\n");
1757 return ERR_PTR(-ENOMEM);
1758 }
1759
1760 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001761
Ram Amrani04886772016-10-10 13:15:38 +03001762 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001763 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1764 if (IS_ERR(ibqp))
1765 kfree(qp);
1766 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001767 }
1768
Amrani, Ramdf158562016-12-22 14:52:24 +02001769 if (udata)
1770 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1771 else
1772 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001773
Amrani, Ramdf158562016-12-22 14:52:24 +02001774 if (rc)
1775 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001776
Ram Amranicecbcdd2016-10-10 13:15:34 +03001777 qp->ibqp.qp_num = qp->qp_id;
1778
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001779 rc = qedr_idr_add(dev, qp, qp->qp_id);
1780 if (rc)
1781 goto err;
1782
Ram Amranicecbcdd2016-10-10 13:15:34 +03001783 return &qp->ibqp;
1784
Amrani, Ramdf158562016-12-22 14:52:24 +02001785err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001786 kfree(qp);
1787
1788 return ERR_PTR(-EFAULT);
1789}
1790
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001791static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001792{
1793 switch (qp_state) {
1794 case QED_ROCE_QP_STATE_RESET:
1795 return IB_QPS_RESET;
1796 case QED_ROCE_QP_STATE_INIT:
1797 return IB_QPS_INIT;
1798 case QED_ROCE_QP_STATE_RTR:
1799 return IB_QPS_RTR;
1800 case QED_ROCE_QP_STATE_RTS:
1801 return IB_QPS_RTS;
1802 case QED_ROCE_QP_STATE_SQD:
1803 return IB_QPS_SQD;
1804 case QED_ROCE_QP_STATE_ERR:
1805 return IB_QPS_ERR;
1806 case QED_ROCE_QP_STATE_SQE:
1807 return IB_QPS_SQE;
1808 }
1809 return IB_QPS_ERR;
1810}
1811
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001812static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1813 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001814{
1815 switch (qp_state) {
1816 case IB_QPS_RESET:
1817 return QED_ROCE_QP_STATE_RESET;
1818 case IB_QPS_INIT:
1819 return QED_ROCE_QP_STATE_INIT;
1820 case IB_QPS_RTR:
1821 return QED_ROCE_QP_STATE_RTR;
1822 case IB_QPS_RTS:
1823 return QED_ROCE_QP_STATE_RTS;
1824 case IB_QPS_SQD:
1825 return QED_ROCE_QP_STATE_SQD;
1826 case IB_QPS_ERR:
1827 return QED_ROCE_QP_STATE_ERR;
1828 default:
1829 return QED_ROCE_QP_STATE_ERR;
1830 }
1831}
1832
1833static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1834{
1835 qed_chain_reset(&qph->pbl);
1836 qph->prod = 0;
1837 qph->cons = 0;
1838 qph->wqe_cons = 0;
1839 qph->db_data.data.value = cpu_to_le16(0);
1840}
1841
1842static int qedr_update_qp_state(struct qedr_dev *dev,
1843 struct qedr_qp *qp,
1844 enum qed_roce_qp_state new_state)
1845{
1846 int status = 0;
1847
1848 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001849 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001850
1851 switch (qp->state) {
1852 case QED_ROCE_QP_STATE_RESET:
1853 switch (new_state) {
1854 case QED_ROCE_QP_STATE_INIT:
1855 qp->prev_wqe_size = 0;
1856 qedr_reset_qp_hwq_info(&qp->sq);
1857 qedr_reset_qp_hwq_info(&qp->rq);
1858 break;
1859 default:
1860 status = -EINVAL;
1861 break;
1862 };
1863 break;
1864 case QED_ROCE_QP_STATE_INIT:
1865 switch (new_state) {
1866 case QED_ROCE_QP_STATE_RTR:
1867 /* Update doorbell (in case post_recv was
1868 * done before move to RTR)
1869 */
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001870
1871 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1872 wmb();
1873 writel(qp->rq.db_data.raw, qp->rq.db);
1874 /* Make sure write takes effect */
1875 mmiowb();
1876 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001877 break;
1878 case QED_ROCE_QP_STATE_ERR:
1879 break;
1880 default:
1881 /* Invalid state change. */
1882 status = -EINVAL;
1883 break;
1884 };
1885 break;
1886 case QED_ROCE_QP_STATE_RTR:
1887 /* RTR->XXX */
1888 switch (new_state) {
1889 case QED_ROCE_QP_STATE_RTS:
1890 break;
1891 case QED_ROCE_QP_STATE_ERR:
1892 break;
1893 default:
1894 /* Invalid state change. */
1895 status = -EINVAL;
1896 break;
1897 };
1898 break;
1899 case QED_ROCE_QP_STATE_RTS:
1900 /* RTS->XXX */
1901 switch (new_state) {
1902 case QED_ROCE_QP_STATE_SQD:
1903 break;
1904 case QED_ROCE_QP_STATE_ERR:
1905 break;
1906 default:
1907 /* Invalid state change. */
1908 status = -EINVAL;
1909 break;
1910 };
1911 break;
1912 case QED_ROCE_QP_STATE_SQD:
1913 /* SQD->XXX */
1914 switch (new_state) {
1915 case QED_ROCE_QP_STATE_RTS:
1916 case QED_ROCE_QP_STATE_ERR:
1917 break;
1918 default:
1919 /* Invalid state change. */
1920 status = -EINVAL;
1921 break;
1922 };
1923 break;
1924 case QED_ROCE_QP_STATE_ERR:
1925 /* ERR->XXX */
1926 switch (new_state) {
1927 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001928 if ((qp->rq.prod != qp->rq.cons) ||
1929 (qp->sq.prod != qp->sq.cons)) {
1930 DP_NOTICE(dev,
1931 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1932 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1933 qp->sq.cons);
1934 status = -EINVAL;
1935 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001936 break;
1937 default:
1938 status = -EINVAL;
1939 break;
1940 };
1941 break;
1942 default:
1943 status = -EINVAL;
1944 break;
1945 };
1946
1947 return status;
1948}
1949
1950int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1951 int attr_mask, struct ib_udata *udata)
1952{
1953 struct qedr_qp *qp = get_qedr_qp(ibqp);
1954 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1955 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001956 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001957 enum ib_qp_state old_qp_state, new_qp_state;
1958 int rc = 0;
1959
1960 DP_DEBUG(dev, QEDR_MSG_QP,
1961 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1962 attr->qp_state);
1963
1964 old_qp_state = qedr_get_ibqp_state(qp->state);
1965 if (attr_mask & IB_QP_STATE)
1966 new_qp_state = attr->qp_state;
1967 else
1968 new_qp_state = old_qp_state;
1969
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001970 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1971 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
1972 ibqp->qp_type, attr_mask,
1973 IB_LINK_LAYER_ETHERNET)) {
1974 DP_ERR(dev,
1975 "modify qp: invalid attribute mask=0x%x specified for\n"
1976 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1977 attr_mask, qp->qp_id, ibqp->qp_type,
1978 old_qp_state, new_qp_state);
1979 rc = -EINVAL;
1980 goto err;
1981 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001982 }
1983
1984 /* Translate the masks... */
1985 if (attr_mask & IB_QP_STATE) {
1986 SET_FIELD(qp_params.modify_flags,
1987 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1988 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1989 }
1990
1991 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1992 qp_params.sqd_async = true;
1993
1994 if (attr_mask & IB_QP_PKEY_INDEX) {
1995 SET_FIELD(qp_params.modify_flags,
1996 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1997 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1998 rc = -EINVAL;
1999 goto err;
2000 }
2001
2002 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2003 }
2004
2005 if (attr_mask & IB_QP_QKEY)
2006 qp->qkey = attr->qkey;
2007
2008 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2009 SET_FIELD(qp_params.modify_flags,
2010 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2011 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2012 IB_ACCESS_REMOTE_READ;
2013 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2014 IB_ACCESS_REMOTE_WRITE;
2015 qp_params.incoming_atomic_en = attr->qp_access_flags &
2016 IB_ACCESS_REMOTE_ATOMIC;
2017 }
2018
2019 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2020 if (attr_mask & IB_QP_PATH_MTU) {
2021 if (attr->path_mtu < IB_MTU_256 ||
2022 attr->path_mtu > IB_MTU_4096) {
2023 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2024 rc = -EINVAL;
2025 goto err;
2026 }
2027 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2028 ib_mtu_enum_to_int(iboe_get_mtu
2029 (dev->ndev->mtu)));
2030 }
2031
2032 if (!qp->mtu) {
2033 qp->mtu =
2034 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2035 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2036 }
2037
2038 SET_FIELD(qp_params.modify_flags,
2039 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2040
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002041 qp_params.traffic_class_tos = grh->traffic_class;
2042 qp_params.flow_label = grh->flow_label;
2043 qp_params.hop_limit_ttl = grh->hop_limit;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002044
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002045 qp->sgid_idx = grh->sgid_index;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002046
2047 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2048 if (rc) {
2049 DP_ERR(dev,
2050 "modify qp: problems with GID index %d (rc=%d)\n",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002051 grh->sgid_index, rc);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002052 return rc;
2053 }
2054
2055 rc = qedr_get_dmac(dev, &attr->ah_attr,
2056 qp_params.remote_mac_addr);
2057 if (rc)
2058 return rc;
2059
2060 qp_params.use_local_mac = true;
2061 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2062
2063 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2064 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2065 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2066 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2067 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2068 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2069 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2070 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002071
2072 qp_params.mtu = qp->mtu;
2073 qp_params.lb_indication = false;
2074 }
2075
2076 if (!qp_params.mtu) {
2077 /* Stay with current MTU */
2078 if (qp->mtu)
2079 qp_params.mtu = qp->mtu;
2080 else
2081 qp_params.mtu =
2082 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2083 }
2084
2085 if (attr_mask & IB_QP_TIMEOUT) {
2086 SET_FIELD(qp_params.modify_flags,
2087 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2088
Kalderon, Michalc3594f22018-03-21 14:51:50 +02002089 /* The received timeout value is an exponent used like this:
2090 * "12.7.34 LOCAL ACK TIMEOUT
2091 * Value representing the transport (ACK) timeout for use by
2092 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2093 * The FW expects timeout in msec so we need to divide the usec
2094 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2095 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2096 * The value of zero means infinite so we use a 'max_t' to make
2097 * sure that sub 1 msec values will be configured as 1 msec.
2098 */
2099 if (attr->timeout)
2100 qp_params.ack_timeout =
2101 1 << max_t(int, attr->timeout - 8, 0);
2102 else
Ram Amranicecbcdd2016-10-10 13:15:34 +03002103 qp_params.ack_timeout = 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002104 }
Kalderon, Michalc3594f22018-03-21 14:51:50 +02002105
Ram Amranicecbcdd2016-10-10 13:15:34 +03002106 if (attr_mask & IB_QP_RETRY_CNT) {
2107 SET_FIELD(qp_params.modify_flags,
2108 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2109 qp_params.retry_cnt = attr->retry_cnt;
2110 }
2111
2112 if (attr_mask & IB_QP_RNR_RETRY) {
2113 SET_FIELD(qp_params.modify_flags,
2114 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2115 qp_params.rnr_retry_cnt = attr->rnr_retry;
2116 }
2117
2118 if (attr_mask & IB_QP_RQ_PSN) {
2119 SET_FIELD(qp_params.modify_flags,
2120 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2121 qp_params.rq_psn = attr->rq_psn;
2122 qp->rq_psn = attr->rq_psn;
2123 }
2124
2125 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2126 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2127 rc = -EINVAL;
2128 DP_ERR(dev,
2129 "unsupported max_rd_atomic=%d, supported=%d\n",
2130 attr->max_rd_atomic,
2131 dev->attr.max_qp_req_rd_atomic_resc);
2132 goto err;
2133 }
2134
2135 SET_FIELD(qp_params.modify_flags,
2136 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2137 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2138 }
2139
2140 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2141 SET_FIELD(qp_params.modify_flags,
2142 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2143 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2144 }
2145
2146 if (attr_mask & IB_QP_SQ_PSN) {
2147 SET_FIELD(qp_params.modify_flags,
2148 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2149 qp_params.sq_psn = attr->sq_psn;
2150 qp->sq_psn = attr->sq_psn;
2151 }
2152
2153 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2154 if (attr->max_dest_rd_atomic >
2155 dev->attr.max_qp_resp_rd_atomic_resc) {
2156 DP_ERR(dev,
2157 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2158 attr->max_dest_rd_atomic,
2159 dev->attr.max_qp_resp_rd_atomic_resc);
2160
2161 rc = -EINVAL;
2162 goto err;
2163 }
2164
2165 SET_FIELD(qp_params.modify_flags,
2166 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2167 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2168 }
2169
2170 if (attr_mask & IB_QP_DEST_QPN) {
2171 SET_FIELD(qp_params.modify_flags,
2172 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2173
2174 qp_params.dest_qp = attr->dest_qp_num;
2175 qp->dest_qp_num = attr->dest_qp_num;
2176 }
2177
2178 if (qp->qp_type != IB_QPT_GSI)
2179 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2180 qp->qed_qp, &qp_params);
2181
2182 if (attr_mask & IB_QP_STATE) {
2183 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02002184 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002185 qp->state = qp_params.new_state;
2186 }
2187
2188err:
2189 return rc;
2190}
2191
2192static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2193{
2194 int ib_qp_acc_flags = 0;
2195
2196 if (params->incoming_rdma_write_en)
2197 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2198 if (params->incoming_rdma_read_en)
2199 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2200 if (params->incoming_atomic_en)
2201 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2202 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2203 return ib_qp_acc_flags;
2204}
2205
2206int qedr_query_qp(struct ib_qp *ibqp,
2207 struct ib_qp_attr *qp_attr,
2208 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2209{
2210 struct qed_rdma_query_qp_out_params params;
2211 struct qedr_qp *qp = get_qedr_qp(ibqp);
2212 struct qedr_dev *dev = qp->dev;
2213 int rc = 0;
2214
2215 memset(&params, 0, sizeof(params));
2216
2217 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2218 if (rc)
2219 goto err;
2220
2221 memset(qp_attr, 0, sizeof(*qp_attr));
2222 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2223
2224 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2225 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002226 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002227 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2228 qp_attr->rq_psn = params.rq_psn;
2229 qp_attr->sq_psn = params.sq_psn;
2230 qp_attr->dest_qp_num = params.dest_qp;
2231
2232 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2233
2234 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2235 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2236 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2237 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002238 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002239 qp_init_attr->cap = qp_attr->cap;
2240
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04002241 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002242 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2243 params.flow_label, qp->sgid_idx,
2244 params.hop_limit_ttl, params.traffic_class_tos);
2245 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2246 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2247 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002248 qp_attr->timeout = params.timeout;
2249 qp_attr->rnr_retry = params.rnr_retry;
2250 qp_attr->retry_cnt = params.retry_cnt;
2251 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2252 qp_attr->pkey_index = params.pkey_index;
2253 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002254 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2255 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002256 qp_attr->alt_pkey_index = 0;
2257 qp_attr->alt_port_num = 0;
2258 qp_attr->alt_timeout = 0;
2259 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2260
2261 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2262 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2263 qp_attr->max_rd_atomic = params.max_rd_atomic;
2264 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2265
2266 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2267 qp_attr->cap.max_inline_data);
2268
2269err:
2270 return rc;
2271}
2272
Bart Van Assche00899852017-10-11 10:49:17 -07002273static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
Amrani, Ramdf158562016-12-22 14:52:24 +02002274{
2275 int rc = 0;
2276
2277 if (qp->qp_type != IB_QPT_GSI) {
2278 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2279 if (rc)
2280 return rc;
2281 }
2282
2283 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2284 qedr_cleanup_user(dev, qp);
2285 else
2286 qedr_cleanup_kernel(dev, qp);
2287
2288 return 0;
2289}
2290
Ram Amranicecbcdd2016-10-10 13:15:34 +03002291int qedr_destroy_qp(struct ib_qp *ibqp)
2292{
2293 struct qedr_qp *qp = get_qedr_qp(ibqp);
2294 struct qedr_dev *dev = qp->dev;
2295 struct ib_qp_attr attr;
2296 int attr_mask = 0;
2297 int rc = 0;
2298
2299 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2300 qp, qp->qp_type);
2301
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002302 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2303 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2304 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2305 (qp->state != QED_ROCE_QP_STATE_INIT)) {
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002306
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002307 attr.qp_state = IB_QPS_ERR;
2308 attr_mask |= IB_QP_STATE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002309
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002310 /* Change the QP state to ERROR */
2311 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2312 }
Kalderon, Michale411e052017-07-26 14:41:56 +03002313 } else {
2314 /* Wait for the connect/accept to complete */
2315 if (qp->ep) {
2316 int wait_count = 1;
2317
2318 while (qp->ep->during_connect) {
2319 DP_DEBUG(dev, QEDR_MSG_QP,
2320 "Still in during connect/accept\n");
2321
2322 msleep(100);
2323 if (wait_count++ > 200) {
2324 DP_NOTICE(dev,
2325 "during connect timeout\n");
2326 break;
2327 }
2328 }
2329 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002330 }
2331
Amrani, Ramdf158562016-12-22 14:52:24 +02002332 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002333 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002334
Amrani, Ramdf158562016-12-22 14:52:24 +02002335 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002336
Kalderon, Michalde0089e2017-07-26 14:41:55 +03002337 if (atomic_dec_and_test(&qp->refcnt)) {
2338 qedr_idr_remove(dev, qp->qp_id);
2339 kfree(qp);
2340 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002341 return rc;
2342}
Ram Amranie0290cc2016-10-10 13:15:35 +03002343
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002344struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002345 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002346{
2347 struct qedr_ah *ah;
2348
2349 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2350 if (!ah)
2351 return ERR_PTR(-ENOMEM);
2352
2353 ah->attr = *attr;
2354
2355 return &ah->ibah;
2356}
2357
2358int qedr_destroy_ah(struct ib_ah *ibah)
2359{
2360 struct qedr_ah *ah = get_qedr_ah(ibah);
2361
2362 kfree(ah);
2363 return 0;
2364}
2365
Ram Amranie0290cc2016-10-10 13:15:35 +03002366static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2367{
2368 struct qedr_pbl *pbl, *tmp;
2369
2370 if (info->pbl_table)
2371 list_add_tail(&info->pbl_table->list_entry,
2372 &info->free_pbl_list);
2373
2374 if (!list_empty(&info->inuse_pbl_list))
2375 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2376
2377 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2378 list_del(&pbl->list_entry);
2379 qedr_free_pbl(dev, &info->pbl_info, pbl);
2380 }
2381}
2382
2383static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2384 size_t page_list_len, bool two_layered)
2385{
2386 struct qedr_pbl *tmp;
2387 int rc;
2388
2389 INIT_LIST_HEAD(&info->free_pbl_list);
2390 INIT_LIST_HEAD(&info->inuse_pbl_list);
2391
2392 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2393 page_list_len, two_layered);
2394 if (rc)
2395 goto done;
2396
2397 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002398 if (IS_ERR(info->pbl_table)) {
2399 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002400 goto done;
2401 }
2402
2403 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2404 &info->pbl_table->pa);
2405
2406 /* in usual case we use 2 PBLs, so we add one to free
2407 * list and allocating another one
2408 */
2409 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002410 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002411 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2412 goto done;
2413 }
2414
2415 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2416
2417 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2418
2419done:
2420 if (rc)
2421 free_mr_info(dev, info);
2422
2423 return rc;
2424}
2425
2426struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2427 u64 usr_addr, int acc, struct ib_udata *udata)
2428{
2429 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2430 struct qedr_mr *mr;
2431 struct qedr_pd *pd;
2432 int rc = -ENOMEM;
2433
2434 pd = get_qedr_pd(ibpd);
2435 DP_DEBUG(dev, QEDR_MSG_MR,
2436 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2437 pd->pd_id, start, len, usr_addr, acc);
2438
2439 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2440 return ERR_PTR(-EINVAL);
2441
2442 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2443 if (!mr)
2444 return ERR_PTR(rc);
2445
2446 mr->type = QEDR_MR_USER;
2447
2448 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2449 if (IS_ERR(mr->umem)) {
2450 rc = -EFAULT;
2451 goto err0;
2452 }
2453
2454 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2455 if (rc)
2456 goto err1;
2457
2458 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
Ram Amranie57bb6b2017-06-05 16:32:27 +03002459 &mr->info.pbl_info, mr->umem->page_shift);
Ram Amranie0290cc2016-10-10 13:15:35 +03002460
2461 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2462 if (rc) {
2463 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2464 goto err1;
2465 }
2466
2467 /* Index only, 18 bit long, lkey = itid << 8 | key */
2468 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2469 mr->hw_mr.key = 0;
2470 mr->hw_mr.pd = pd->pd_id;
2471 mr->hw_mr.local_read = 1;
2472 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2473 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2474 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2475 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2476 mr->hw_mr.mw_bind = false;
2477 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2478 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2479 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002480 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002481 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2482 mr->hw_mr.length = len;
2483 mr->hw_mr.vaddr = usr_addr;
2484 mr->hw_mr.zbva = false;
2485 mr->hw_mr.phy_mr = false;
2486 mr->hw_mr.dma_mr = false;
2487
2488 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2489 if (rc) {
2490 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2491 goto err2;
2492 }
2493
2494 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2495 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2496 mr->hw_mr.remote_atomic)
2497 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2498
2499 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2500 mr->ibmr.lkey);
2501 return &mr->ibmr;
2502
2503err2:
2504 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2505err1:
2506 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2507err0:
2508 kfree(mr);
2509 return ERR_PTR(rc);
2510}
2511
2512int qedr_dereg_mr(struct ib_mr *ib_mr)
2513{
2514 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2515 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2516 int rc = 0;
2517
2518 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2519 if (rc)
2520 return rc;
2521
2522 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2523
2524 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2525 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2526
2527 /* it could be user registered memory. */
2528 if (mr->umem)
2529 ib_umem_release(mr->umem);
2530
2531 kfree(mr);
2532
2533 return rc;
2534}
2535
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002536static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2537 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002538{
2539 struct qedr_pd *pd = get_qedr_pd(ibpd);
2540 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2541 struct qedr_mr *mr;
2542 int rc = -ENOMEM;
2543
2544 DP_DEBUG(dev, QEDR_MSG_MR,
2545 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2546 max_page_list_len);
2547
2548 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2549 if (!mr)
2550 return ERR_PTR(rc);
2551
2552 mr->dev = dev;
2553 mr->type = QEDR_MR_FRMR;
2554
2555 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2556 if (rc)
2557 goto err0;
2558
2559 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2560 if (rc) {
2561 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2562 goto err0;
2563 }
2564
2565 /* Index only, 18 bit long, lkey = itid << 8 | key */
2566 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2567 mr->hw_mr.key = 0;
2568 mr->hw_mr.pd = pd->pd_id;
2569 mr->hw_mr.local_read = 1;
2570 mr->hw_mr.local_write = 0;
2571 mr->hw_mr.remote_read = 0;
2572 mr->hw_mr.remote_write = 0;
2573 mr->hw_mr.remote_atomic = 0;
2574 mr->hw_mr.mw_bind = false;
2575 mr->hw_mr.pbl_ptr = 0;
2576 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2577 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2578 mr->hw_mr.fbo = 0;
2579 mr->hw_mr.length = 0;
2580 mr->hw_mr.vaddr = 0;
2581 mr->hw_mr.zbva = false;
2582 mr->hw_mr.phy_mr = true;
2583 mr->hw_mr.dma_mr = false;
2584
2585 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2586 if (rc) {
2587 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2588 goto err1;
2589 }
2590
2591 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2592 mr->ibmr.rkey = mr->ibmr.lkey;
2593
2594 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2595 return mr;
2596
2597err1:
2598 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2599err0:
2600 kfree(mr);
2601 return ERR_PTR(rc);
2602}
2603
2604struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2605 enum ib_mr_type mr_type, u32 max_num_sg)
2606{
Ram Amranie0290cc2016-10-10 13:15:35 +03002607 struct qedr_mr *mr;
2608
2609 if (mr_type != IB_MR_TYPE_MEM_REG)
2610 return ERR_PTR(-EINVAL);
2611
2612 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2613
2614 if (IS_ERR(mr))
2615 return ERR_PTR(-EINVAL);
2616
Ram Amranie0290cc2016-10-10 13:15:35 +03002617 return &mr->ibmr;
2618}
2619
2620static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2621{
2622 struct qedr_mr *mr = get_qedr_mr(ibmr);
2623 struct qedr_pbl *pbl_table;
2624 struct regpair *pbe;
2625 u32 pbes_in_page;
2626
2627 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2628 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2629 return -ENOMEM;
2630 }
2631
2632 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2633 mr->npages, addr);
2634
2635 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2636 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2637 pbe = (struct regpair *)pbl_table->va;
2638 pbe += mr->npages % pbes_in_page;
2639 pbe->lo = cpu_to_le32((u32)addr);
2640 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2641
2642 mr->npages++;
2643
2644 return 0;
2645}
2646
2647static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2648{
2649 int work = info->completed - info->completed_handled - 1;
2650
2651 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2652 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2653 struct qedr_pbl *pbl;
2654
2655 /* Free all the page list that are possible to be freed
2656 * (all the ones that were invalidated), under the assumption
2657 * that if an FMR was completed successfully that means that
2658 * if there was an invalidate operation before it also ended
2659 */
2660 pbl = list_first_entry(&info->inuse_pbl_list,
2661 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002662 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002663 info->completed_handled++;
2664 }
2665}
2666
2667int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2668 int sg_nents, unsigned int *sg_offset)
2669{
2670 struct qedr_mr *mr = get_qedr_mr(ibmr);
2671
2672 mr->npages = 0;
2673
2674 handle_completed_mrs(mr->dev, &mr->info);
2675 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2676}
2677
2678struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2679{
2680 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2681 struct qedr_pd *pd = get_qedr_pd(ibpd);
2682 struct qedr_mr *mr;
2683 int rc;
2684
2685 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2686 if (!mr)
2687 return ERR_PTR(-ENOMEM);
2688
2689 mr->type = QEDR_MR_DMA;
2690
2691 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2692 if (rc) {
2693 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2694 goto err1;
2695 }
2696
2697 /* index only, 18 bit long, lkey = itid << 8 | key */
2698 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2699 mr->hw_mr.pd = pd->pd_id;
2700 mr->hw_mr.local_read = 1;
2701 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2702 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2703 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2704 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2705 mr->hw_mr.dma_mr = true;
2706
2707 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2708 if (rc) {
2709 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2710 goto err2;
2711 }
2712
2713 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2714 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2715 mr->hw_mr.remote_atomic)
2716 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2717
2718 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2719 return &mr->ibmr;
2720
2721err2:
2722 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2723err1:
2724 kfree(mr);
2725 return ERR_PTR(rc);
2726}
Ram Amraniafa0e132016-10-10 13:15:36 +03002727
2728static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2729{
2730 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2731}
2732
2733static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2734{
2735 int i, len = 0;
2736
2737 for (i = 0; i < num_sge; i++)
2738 len += sg_list[i].length;
2739
2740 return len;
2741}
2742
2743static void swap_wqe_data64(u64 *p)
2744{
2745 int i;
2746
2747 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2748 *p = cpu_to_be64(cpu_to_le64(*p));
2749}
2750
2751static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2752 struct qedr_qp *qp, u8 *wqe_size,
2753 struct ib_send_wr *wr,
2754 struct ib_send_wr **bad_wr, u8 *bits,
2755 u8 bit)
2756{
2757 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2758 char *seg_prt, *wqe;
2759 int i, seg_siz;
2760
2761 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2762 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2763 *bad_wr = wr;
2764 return 0;
2765 }
2766
2767 if (!data_size)
2768 return data_size;
2769
2770 *bits |= bit;
2771
2772 seg_prt = NULL;
2773 wqe = NULL;
2774 seg_siz = 0;
2775
2776 /* Copy data inline */
2777 for (i = 0; i < wr->num_sge; i++) {
2778 u32 len = wr->sg_list[i].length;
2779 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2780
2781 while (len > 0) {
2782 u32 cur;
2783
2784 /* New segment required */
2785 if (!seg_siz) {
2786 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2787 seg_prt = wqe;
2788 seg_siz = sizeof(struct rdma_sq_common_wqe);
2789 (*wqe_size)++;
2790 }
2791
2792 /* Calculate currently allowed length */
2793 cur = min_t(u32, len, seg_siz);
2794 memcpy(seg_prt, src, cur);
2795
2796 /* Update segment variables */
2797 seg_prt += cur;
2798 seg_siz -= cur;
2799
2800 /* Update sge variables */
2801 src += cur;
2802 len -= cur;
2803
2804 /* Swap fully-completed segments */
2805 if (!seg_siz)
2806 swap_wqe_data64((u64 *)wqe);
2807 }
2808 }
2809
2810 /* swap last not completed segment */
2811 if (seg_siz)
2812 swap_wqe_data64((u64 *)wqe);
2813
2814 return data_size;
2815}
2816
2817#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2818 do { \
2819 DMA_REGPAIR_LE(sge->addr, vaddr); \
2820 (sge)->length = cpu_to_le32(vlength); \
2821 (sge)->flags = cpu_to_le32(vflags); \
2822 } while (0)
2823
2824#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2825 do { \
2826 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2827 (hdr)->num_sges = num_sge; \
2828 } while (0)
2829
2830#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2831 do { \
2832 DMA_REGPAIR_LE(sge->addr, vaddr); \
2833 (sge)->length = cpu_to_le32(vlength); \
2834 (sge)->l_key = cpu_to_le32(vlkey); \
2835 } while (0)
2836
2837static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2838 struct ib_send_wr *wr)
2839{
2840 u32 data_size = 0;
2841 int i;
2842
2843 for (i = 0; i < wr->num_sge; i++) {
2844 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2845
2846 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2847 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2848 sge->length = cpu_to_le32(wr->sg_list[i].length);
2849 data_size += wr->sg_list[i].length;
2850 }
2851
2852 if (wqe_size)
2853 *wqe_size += wr->num_sge;
2854
2855 return data_size;
2856}
2857
2858static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2859 struct qedr_qp *qp,
2860 struct rdma_sq_rdma_wqe_1st *rwqe,
2861 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2862 struct ib_send_wr *wr,
2863 struct ib_send_wr **bad_wr)
2864{
2865 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2866 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2867
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002868 if (wr->send_flags & IB_SEND_INLINE &&
2869 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2870 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002871 u8 flags = 0;
2872
2873 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2874 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2875 bad_wr, &rwqe->flags, flags);
2876 }
2877
2878 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2879}
2880
2881static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2882 struct qedr_qp *qp,
2883 struct rdma_sq_send_wqe_1st *swqe,
2884 struct rdma_sq_send_wqe_2st *swqe2,
2885 struct ib_send_wr *wr,
2886 struct ib_send_wr **bad_wr)
2887{
2888 memset(swqe2, 0, sizeof(*swqe2));
2889 if (wr->send_flags & IB_SEND_INLINE) {
2890 u8 flags = 0;
2891
2892 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2893 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2894 bad_wr, &swqe->flags, flags);
2895 }
2896
2897 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2898}
2899
2900static int qedr_prepare_reg(struct qedr_qp *qp,
2901 struct rdma_sq_fmr_wqe_1st *fwqe1,
2902 struct ib_reg_wr *wr)
2903{
2904 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2905 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2906
2907 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2908 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2909 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2910 fwqe1->l_key = wr->key;
2911
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002912 fwqe2->access_ctrl = 0;
2913
Ram Amraniafa0e132016-10-10 13:15:36 +03002914 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2915 !!(wr->access & IB_ACCESS_REMOTE_READ));
2916 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2917 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2918 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2919 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2920 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2921 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2922 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2923 fwqe2->fmr_ctrl = 0;
2924
2925 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2926 ilog2(mr->ibmr.page_size) - 12);
2927
2928 fwqe2->length_hi = 0;
2929 fwqe2->length_lo = mr->ibmr.length;
2930 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2931 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2932
2933 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2934
2935 return 0;
2936}
2937
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002938static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002939{
2940 switch (opcode) {
2941 case IB_WR_RDMA_WRITE:
2942 case IB_WR_RDMA_WRITE_WITH_IMM:
2943 return IB_WC_RDMA_WRITE;
2944 case IB_WR_SEND_WITH_IMM:
2945 case IB_WR_SEND:
2946 case IB_WR_SEND_WITH_INV:
2947 return IB_WC_SEND;
2948 case IB_WR_RDMA_READ:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03002949 case IB_WR_RDMA_READ_WITH_INV:
Ram Amraniafa0e132016-10-10 13:15:36 +03002950 return IB_WC_RDMA_READ;
2951 case IB_WR_ATOMIC_CMP_AND_SWP:
2952 return IB_WC_COMP_SWAP;
2953 case IB_WR_ATOMIC_FETCH_AND_ADD:
2954 return IB_WC_FETCH_ADD;
2955 case IB_WR_REG_MR:
2956 return IB_WC_REG_MR;
2957 case IB_WR_LOCAL_INV:
2958 return IB_WC_LOCAL_INV;
2959 default:
2960 return IB_WC_SEND;
2961 }
2962}
2963
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002964static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002965{
2966 int wq_is_full, err_wr, pbl_is_full;
2967 struct qedr_dev *dev = qp->dev;
2968
2969 /* prevent SQ overflow and/or processing of a bad WR */
2970 err_wr = wr->num_sge > qp->sq.max_sges;
2971 wq_is_full = qedr_wq_is_full(&qp->sq);
2972 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2973 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2974 if (wq_is_full || err_wr || pbl_is_full) {
2975 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2976 DP_ERR(dev,
2977 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2978 qp);
2979 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2980 }
2981
2982 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2983 DP_ERR(dev,
2984 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2985 qp);
2986 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2987 }
2988
2989 if (pbl_is_full &&
2990 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2991 DP_ERR(dev,
2992 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2993 qp);
2994 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2995 }
2996 return false;
2997 }
2998 return true;
2999}
3000
Ram Amrani27a4b1a2017-01-24 13:51:39 +02003001static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03003002 struct ib_send_wr **bad_wr)
3003{
3004 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3005 struct qedr_qp *qp = get_qedr_qp(ibqp);
3006 struct rdma_sq_atomic_wqe_1st *awqe1;
3007 struct rdma_sq_atomic_wqe_2nd *awqe2;
3008 struct rdma_sq_atomic_wqe_3rd *awqe3;
3009 struct rdma_sq_send_wqe_2st *swqe2;
3010 struct rdma_sq_local_inv_wqe *iwqe;
3011 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3012 struct rdma_sq_send_wqe_1st *swqe;
3013 struct rdma_sq_rdma_wqe_1st *rwqe;
3014 struct rdma_sq_fmr_wqe_1st *fwqe1;
3015 struct rdma_sq_common_wqe *wqe;
3016 u32 length;
3017 int rc = 0;
3018 bool comp;
3019
3020 if (!qedr_can_post_send(qp, wr)) {
3021 *bad_wr = wr;
3022 return -ENOMEM;
3023 }
3024
3025 wqe = qed_chain_produce(&qp->sq.pbl);
3026 qp->wqe_wr_id[qp->sq.prod].signaled =
3027 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3028
3029 wqe->flags = 0;
3030 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3031 !!(wr->send_flags & IB_SEND_SOLICITED));
3032 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3033 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3034 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3035 !!(wr->send_flags & IB_SEND_FENCE));
3036 wqe->prev_wqe_size = qp->prev_wqe_size;
3037
3038 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3039
3040 switch (wr->opcode) {
3041 case IB_WR_SEND_WITH_IMM:
Kalderon, Michal551e1c62018-03-05 10:50:11 +02003042 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3043 rc = -EINVAL;
3044 *bad_wr = wr;
3045 break;
3046 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003047 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3048 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3049 swqe->wqe_size = 2;
3050 swqe2 = qed_chain_produce(&qp->sq.pbl);
3051
Jason Gunthorpe7bed7eb2018-01-11 14:43:07 -07003052 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
Ram Amraniafa0e132016-10-10 13:15:36 +03003053 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3054 wr, bad_wr);
3055 swqe->length = cpu_to_le32(length);
3056 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3057 qp->prev_wqe_size = swqe->wqe_size;
3058 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3059 break;
3060 case IB_WR_SEND:
3061 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3062 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3063
3064 swqe->wqe_size = 2;
3065 swqe2 = qed_chain_produce(&qp->sq.pbl);
3066 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3067 wr, bad_wr);
3068 swqe->length = cpu_to_le32(length);
3069 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3070 qp->prev_wqe_size = swqe->wqe_size;
3071 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3072 break;
3073 case IB_WR_SEND_WITH_INV:
3074 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3075 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3076 swqe2 = qed_chain_produce(&qp->sq.pbl);
3077 swqe->wqe_size = 2;
3078 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3079 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3080 wr, bad_wr);
3081 swqe->length = cpu_to_le32(length);
3082 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3083 qp->prev_wqe_size = swqe->wqe_size;
3084 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3085 break;
3086
3087 case IB_WR_RDMA_WRITE_WITH_IMM:
Kalderon, Michal551e1c62018-03-05 10:50:11 +02003088 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3089 rc = -EINVAL;
3090 *bad_wr = wr;
3091 break;
3092 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003093 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3094 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3095
3096 rwqe->wqe_size = 2;
3097 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3098 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3099 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3100 wr, bad_wr);
3101 rwqe->length = cpu_to_le32(length);
3102 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3103 qp->prev_wqe_size = rwqe->wqe_size;
3104 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3105 break;
3106 case IB_WR_RDMA_WRITE:
3107 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3108 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3109
3110 rwqe->wqe_size = 2;
3111 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3112 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3113 wr, bad_wr);
3114 rwqe->length = cpu_to_le32(length);
3115 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3116 qp->prev_wqe_size = rwqe->wqe_size;
3117 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3118 break;
3119 case IB_WR_RDMA_READ_WITH_INV:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03003120 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
Bart Van Assche1b8a708b2017-10-11 10:49:19 -07003121 /* fallthrough -- same is identical to RDMA READ */
Ram Amraniafa0e132016-10-10 13:15:36 +03003122
3123 case IB_WR_RDMA_READ:
3124 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3125 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3126
3127 rwqe->wqe_size = 2;
3128 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3129 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3130 wr, bad_wr);
3131 rwqe->length = cpu_to_le32(length);
3132 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3133 qp->prev_wqe_size = rwqe->wqe_size;
3134 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3135 break;
3136
3137 case IB_WR_ATOMIC_CMP_AND_SWP:
3138 case IB_WR_ATOMIC_FETCH_AND_ADD:
3139 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3140 awqe1->wqe_size = 4;
3141
3142 awqe2 = qed_chain_produce(&qp->sq.pbl);
3143 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3144 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3145
3146 awqe3 = qed_chain_produce(&qp->sq.pbl);
3147
3148 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3149 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3150 DMA_REGPAIR_LE(awqe3->swap_data,
3151 atomic_wr(wr)->compare_add);
3152 } else {
3153 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3154 DMA_REGPAIR_LE(awqe3->swap_data,
3155 atomic_wr(wr)->swap);
3156 DMA_REGPAIR_LE(awqe3->cmp_data,
3157 atomic_wr(wr)->compare_add);
3158 }
3159
3160 qedr_prepare_sq_sges(qp, NULL, wr);
3161
3162 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3163 qp->prev_wqe_size = awqe1->wqe_size;
3164 break;
3165
3166 case IB_WR_LOCAL_INV:
3167 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3168 iwqe->wqe_size = 1;
3169
3170 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3171 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3172 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3173 qp->prev_wqe_size = iwqe->wqe_size;
3174 break;
3175 case IB_WR_REG_MR:
3176 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3177 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3178 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3179 fwqe1->wqe_size = 2;
3180
3181 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3182 if (rc) {
3183 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3184 *bad_wr = wr;
3185 break;
3186 }
3187
3188 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3189 qp->prev_wqe_size = fwqe1->wqe_size;
3190 break;
3191 default:
3192 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3193 rc = -EINVAL;
3194 *bad_wr = wr;
3195 break;
3196 }
3197
3198 if (*bad_wr) {
3199 u16 value;
3200
3201 /* Restore prod to its position before
3202 * this WR was processed
3203 */
3204 value = le16_to_cpu(qp->sq.db_data.data.value);
3205 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3206
3207 /* Restore prev_wqe_size */
3208 qp->prev_wqe_size = wqe->prev_wqe_size;
3209 rc = -EINVAL;
3210 DP_ERR(dev, "POST SEND FAILED\n");
3211 }
3212
3213 return rc;
3214}
3215
3216int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3217 struct ib_send_wr **bad_wr)
3218{
3219 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3220 struct qedr_qp *qp = get_qedr_qp(ibqp);
3221 unsigned long flags;
3222 int rc = 0;
3223
3224 *bad_wr = NULL;
3225
Ram Amrani04886772016-10-10 13:15:38 +03003226 if (qp->qp_type == IB_QPT_GSI)
3227 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3228
Ram Amraniafa0e132016-10-10 13:15:36 +03003229 spin_lock_irqsave(&qp->q_lock, flags);
3230
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003231 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3232 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3233 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3234 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3235 spin_unlock_irqrestore(&qp->q_lock, flags);
3236 *bad_wr = wr;
3237 DP_DEBUG(dev, QEDR_MSG_CQ,
3238 "QP in wrong state! QP icid=0x%x state %d\n",
3239 qp->icid, qp->state);
3240 return -EINVAL;
3241 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003242 }
3243
Ram Amraniafa0e132016-10-10 13:15:36 +03003244 while (wr) {
3245 rc = __qedr_post_send(ibqp, wr, bad_wr);
3246 if (rc)
3247 break;
3248
3249 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3250
3251 qedr_inc_sw_prod(&qp->sq);
3252
3253 qp->sq.db_data.data.value++;
3254
3255 wr = wr->next;
3256 }
3257
3258 /* Trigger doorbell
3259 * If there was a failure in the first WR then it will be triggered in
3260 * vane. However this is not harmful (as long as the producer value is
3261 * unchanged). For performance reasons we avoid checking for this
3262 * redundant doorbell.
3263 */
3264 wmb();
3265 writel(qp->sq.db_data.raw, qp->sq.db);
3266
3267 /* Make sure write sticks */
3268 mmiowb();
3269
3270 spin_unlock_irqrestore(&qp->q_lock, flags);
3271
3272 return rc;
3273}
3274
3275int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3276 struct ib_recv_wr **bad_wr)
3277{
3278 struct qedr_qp *qp = get_qedr_qp(ibqp);
3279 struct qedr_dev *dev = qp->dev;
3280 unsigned long flags;
3281 int status = 0;
3282
Ram Amrani04886772016-10-10 13:15:38 +03003283 if (qp->qp_type == IB_QPT_GSI)
3284 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3285
Ram Amraniafa0e132016-10-10 13:15:36 +03003286 spin_lock_irqsave(&qp->q_lock, flags);
3287
Amrani, Ram922d9a42016-12-22 14:40:38 +02003288 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003289 spin_unlock_irqrestore(&qp->q_lock, flags);
3290 *bad_wr = wr;
3291 return -EINVAL;
3292 }
3293
3294 while (wr) {
3295 int i;
3296
3297 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3298 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3299 wr->num_sge > qp->rq.max_sges) {
3300 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3301 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3302 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3303 qp->rq.max_sges);
3304 status = -ENOMEM;
3305 *bad_wr = wr;
3306 break;
3307 }
3308 for (i = 0; i < wr->num_sge; i++) {
3309 u32 flags = 0;
3310 struct rdma_rq_sge *rqe =
3311 qed_chain_produce(&qp->rq.pbl);
3312
3313 /* First one must include the number
3314 * of SGE in the list
3315 */
3316 if (!i)
3317 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3318 wr->num_sge);
3319
3320 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3321 wr->sg_list[i].lkey);
3322
3323 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3324 wr->sg_list[i].length, flags);
3325 }
3326
3327 /* Special case of no sges. FW requires between 1-4 sges...
3328 * in this case we need to post 1 sge with length zero. this is
3329 * because rdma write with immediate consumes an RQ.
3330 */
3331 if (!wr->num_sge) {
3332 u32 flags = 0;
3333 struct rdma_rq_sge *rqe =
3334 qed_chain_produce(&qp->rq.pbl);
3335
3336 /* First one must include the number
3337 * of SGE in the list
3338 */
3339 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3340 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3341
3342 RQ_SGE_SET(rqe, 0, 0, flags);
3343 i = 1;
3344 }
3345
3346 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3347 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3348
3349 qedr_inc_sw_prod(&qp->rq);
3350
3351 /* Flush all the writes before signalling doorbell */
3352 wmb();
3353
3354 qp->rq.db_data.data.value++;
3355
3356 writel(qp->rq.db_data.raw, qp->rq.db);
3357
3358 /* Make sure write sticks */
3359 mmiowb();
3360
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003361 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3362 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3363 mmiowb(); /* for second doorbell */
3364 }
3365
Ram Amraniafa0e132016-10-10 13:15:36 +03003366 wr = wr->next;
3367 }
3368
3369 spin_unlock_irqrestore(&qp->q_lock, flags);
3370
3371 return status;
3372}
3373
3374static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3375{
3376 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3377
3378 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3379 cq->pbl_toggle;
3380}
3381
3382static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3383{
3384 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3385 struct qedr_qp *qp;
3386
3387 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3388 resp_cqe->qp_handle.lo,
3389 u64);
3390 return qp;
3391}
3392
3393static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3394{
3395 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3396
3397 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3398}
3399
3400/* Return latest CQE (needs processing) */
3401static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3402{
3403 return cq->latest_cqe;
3404}
3405
3406/* In fmr we need to increase the number of fmr completed counter for the fmr
3407 * algorithm determining whether we can free a pbl or not.
3408 * we need to perform this whether the work request was signaled or not. for
3409 * this purpose we call this function from the condition that checks if a wr
3410 * should be skipped, to make sure we don't miss it ( possibly this fmr
3411 * operation was not signalted)
3412 */
3413static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3414{
3415 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3416 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3417}
3418
3419static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3420 struct qedr_cq *cq, int num_entries,
3421 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3422 int force)
3423{
3424 u16 cnt = 0;
3425
3426 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3427 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3428 qedr_chk_if_fmr(qp);
3429 /* skip WC */
3430 goto next_cqe;
3431 }
3432
3433 /* fill WC */
3434 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003435 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003436 wc->wc_flags = 0;
3437 wc->src_qp = qp->id;
3438 wc->qp = &qp->ibqp;
3439
3440 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3441 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3442
3443 switch (wc->opcode) {
3444 case IB_WC_RDMA_WRITE:
3445 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3446 break;
3447 case IB_WC_COMP_SWAP:
3448 case IB_WC_FETCH_ADD:
3449 wc->byte_len = 8;
3450 break;
3451 case IB_WC_REG_MR:
3452 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3453 break;
Michal Kalderondac27382017-06-05 16:32:26 +03003454 case IB_WC_RDMA_READ:
3455 case IB_WC_SEND:
3456 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3457 break;
Ram Amraniafa0e132016-10-10 13:15:36 +03003458 default:
3459 break;
3460 }
3461
3462 num_entries--;
3463 wc++;
3464 cnt++;
3465next_cqe:
3466 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3467 qed_chain_consume(&qp->sq.pbl);
3468 qedr_inc_sw_cons(&qp->sq);
3469 }
3470
3471 return cnt;
3472}
3473
3474static int qedr_poll_cq_req(struct qedr_dev *dev,
3475 struct qedr_qp *qp, struct qedr_cq *cq,
3476 int num_entries, struct ib_wc *wc,
3477 struct rdma_cqe_requester *req)
3478{
3479 int cnt = 0;
3480
3481 switch (req->status) {
3482 case RDMA_CQE_REQ_STS_OK:
3483 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3484 IB_WC_SUCCESS, 0);
3485 break;
3486 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003487 if (qp->state != QED_ROCE_QP_STATE_ERR)
Kalderon, Michaldc728f72018-01-25 13:23:20 +02003488 DP_DEBUG(dev, QEDR_MSG_CQ,
3489 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3490 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003491 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003492 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003493 break;
3494 default:
3495 /* process all WQE before the cosumer */
3496 qp->state = QED_ROCE_QP_STATE_ERR;
3497 cnt = process_req(dev, qp, cq, num_entries, wc,
3498 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3499 wc += cnt;
3500 /* if we have extra WC fill it with actual error info */
3501 if (cnt < num_entries) {
3502 enum ib_wc_status wc_status;
3503
3504 switch (req->status) {
3505 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3506 DP_ERR(dev,
3507 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3508 cq->icid, qp->icid);
3509 wc_status = IB_WC_BAD_RESP_ERR;
3510 break;
3511 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3512 DP_ERR(dev,
3513 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3514 cq->icid, qp->icid);
3515 wc_status = IB_WC_LOC_LEN_ERR;
3516 break;
3517 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3518 DP_ERR(dev,
3519 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3520 cq->icid, qp->icid);
3521 wc_status = IB_WC_LOC_QP_OP_ERR;
3522 break;
3523 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3524 DP_ERR(dev,
3525 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3526 cq->icid, qp->icid);
3527 wc_status = IB_WC_LOC_PROT_ERR;
3528 break;
3529 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3530 DP_ERR(dev,
3531 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3532 cq->icid, qp->icid);
3533 wc_status = IB_WC_MW_BIND_ERR;
3534 break;
3535 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3536 DP_ERR(dev,
3537 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3538 cq->icid, qp->icid);
3539 wc_status = IB_WC_REM_INV_REQ_ERR;
3540 break;
3541 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3542 DP_ERR(dev,
3543 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3544 cq->icid, qp->icid);
3545 wc_status = IB_WC_REM_ACCESS_ERR;
3546 break;
3547 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3548 DP_ERR(dev,
3549 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3550 cq->icid, qp->icid);
3551 wc_status = IB_WC_REM_OP_ERR;
3552 break;
3553 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3554 DP_ERR(dev,
3555 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3556 cq->icid, qp->icid);
3557 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3558 break;
3559 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3560 DP_ERR(dev,
3561 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3562 cq->icid, qp->icid);
3563 wc_status = IB_WC_RETRY_EXC_ERR;
3564 break;
3565 default:
3566 DP_ERR(dev,
3567 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3568 cq->icid, qp->icid);
3569 wc_status = IB_WC_GENERAL_ERR;
3570 }
3571 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3572 wc_status, 1);
3573 }
3574 }
3575
3576 return cnt;
3577}
3578
Amrani, Ramb6acd712017-04-27 13:35:35 +03003579static inline int qedr_cqe_resp_status_to_ib(u8 status)
3580{
3581 switch (status) {
3582 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3583 return IB_WC_LOC_ACCESS_ERR;
3584 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3585 return IB_WC_LOC_LEN_ERR;
3586 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3587 return IB_WC_LOC_QP_OP_ERR;
3588 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3589 return IB_WC_LOC_PROT_ERR;
3590 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3591 return IB_WC_MW_BIND_ERR;
3592 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3593 return IB_WC_REM_INV_RD_REQ_ERR;
3594 case RDMA_CQE_RESP_STS_OK:
3595 return IB_WC_SUCCESS;
3596 default:
3597 return IB_WC_GENERAL_ERR;
3598 }
3599}
3600
3601static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3602 struct ib_wc *wc)
3603{
3604 wc->status = IB_WC_SUCCESS;
3605 wc->byte_len = le32_to_cpu(resp->length);
3606
3607 if (resp->flags & QEDR_RESP_IMM) {
Jason Gunthorpe7bed7eb2018-01-11 14:43:07 -07003608 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
Amrani, Ramb6acd712017-04-27 13:35:35 +03003609 wc->wc_flags |= IB_WC_WITH_IMM;
3610
3611 if (resp->flags & QEDR_RESP_RDMA)
3612 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3613
3614 if (resp->flags & QEDR_RESP_INV)
3615 return -EINVAL;
3616
3617 } else if (resp->flags & QEDR_RESP_INV) {
3618 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3619 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3620
3621 if (resp->flags & QEDR_RESP_RDMA)
3622 return -EINVAL;
3623
3624 } else if (resp->flags & QEDR_RESP_RDMA) {
3625 return -EINVAL;
3626 }
3627
3628 return 0;
3629}
3630
Ram Amraniafa0e132016-10-10 13:15:36 +03003631static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3632 struct qedr_cq *cq, struct ib_wc *wc,
3633 struct rdma_cqe_responder *resp, u64 wr_id)
3634{
Amrani, Ramb6acd712017-04-27 13:35:35 +03003635 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
Ram Amraniafa0e132016-10-10 13:15:36 +03003636 wc->opcode = IB_WC_RECV;
3637 wc->wc_flags = 0;
3638
Amrani, Ramb6acd712017-04-27 13:35:35 +03003639 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3640 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3641 DP_ERR(dev,
3642 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3643 cq, cq->icid, resp->flags);
Ram Amraniafa0e132016-10-10 13:15:36 +03003644
Amrani, Ramb6acd712017-04-27 13:35:35 +03003645 } else {
3646 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3647 if (wc->status == IB_WC_GENERAL_ERR)
3648 DP_ERR(dev,
3649 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3650 cq, cq->icid, resp->status);
Ram Amraniafa0e132016-10-10 13:15:36 +03003651 }
3652
Amrani, Ramb6acd712017-04-27 13:35:35 +03003653 /* Fill the rest of the WC */
Amrani, Ram27035a12016-12-22 14:40:39 +02003654 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003655 wc->src_qp = qp->id;
3656 wc->qp = &qp->ibqp;
3657 wc->wr_id = wr_id;
3658}
3659
3660static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3661 struct qedr_cq *cq, struct ib_wc *wc,
3662 struct rdma_cqe_responder *resp)
3663{
3664 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3665
3666 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3667
3668 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3669 qed_chain_consume(&qp->rq.pbl);
3670 qedr_inc_sw_cons(&qp->rq);
3671
3672 return 1;
3673}
3674
3675static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3676 int num_entries, struct ib_wc *wc, u16 hw_cons)
3677{
3678 u16 cnt = 0;
3679
3680 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3681 /* fill WC */
3682 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003683 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003684 wc->wc_flags = 0;
3685 wc->src_qp = qp->id;
3686 wc->byte_len = 0;
3687 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3688 wc->qp = &qp->ibqp;
3689 num_entries--;
3690 wc++;
3691 cnt++;
3692 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3693 qed_chain_consume(&qp->rq.pbl);
3694 qedr_inc_sw_cons(&qp->rq);
3695 }
3696
3697 return cnt;
3698}
3699
3700static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3701 struct rdma_cqe_responder *resp, int *update)
3702{
3703 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3704 consume_cqe(cq);
3705 *update |= 1;
3706 }
3707}
3708
3709static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3710 struct qedr_cq *cq, int num_entries,
3711 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3712 int *update)
3713{
3714 int cnt;
3715
3716 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3717 cnt = process_resp_flush(qp, cq, num_entries, wc,
3718 resp->rq_cons);
3719 try_consume_resp_cqe(cq, qp, resp, update);
3720 } else {
3721 cnt = process_resp_one(dev, qp, cq, wc, resp);
3722 consume_cqe(cq);
3723 *update |= 1;
3724 }
3725
3726 return cnt;
3727}
3728
3729static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3730 struct rdma_cqe_requester *req, int *update)
3731{
3732 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3733 consume_cqe(cq);
3734 *update |= 1;
3735 }
3736}
3737
3738int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3739{
3740 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3741 struct qedr_cq *cq = get_qedr_cq(ibcq);
Kalderon, Michale3fd1122018-03-05 10:50:10 +02003742 union rdma_cqe *cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003743 u32 old_cons, new_cons;
3744 unsigned long flags;
3745 int update = 0;
3746 int done = 0;
3747
Amrani, Ram4dd72632017-04-27 13:35:34 +03003748 if (cq->destroyed) {
3749 DP_ERR(dev,
3750 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3751 cq, cq->icid);
3752 return 0;
3753 }
3754
Ram Amrani04886772016-10-10 13:15:38 +03003755 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3756 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3757
Ram Amraniafa0e132016-10-10 13:15:36 +03003758 spin_lock_irqsave(&cq->cq_lock, flags);
Kalderon, Michale3fd1122018-03-05 10:50:10 +02003759 cqe = cq->latest_cqe;
Ram Amraniafa0e132016-10-10 13:15:36 +03003760 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3761 while (num_entries && is_valid_cqe(cq, cqe)) {
3762 struct qedr_qp *qp;
3763 int cnt = 0;
3764
3765 /* prevent speculative reads of any field of CQE */
3766 rmb();
3767
3768 qp = cqe_get_qp(cqe);
3769 if (!qp) {
3770 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3771 break;
3772 }
3773
3774 wc->qp = &qp->ibqp;
3775
3776 switch (cqe_get_type(cqe)) {
3777 case RDMA_CQE_TYPE_REQUESTER:
3778 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3779 &cqe->req);
3780 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3781 break;
3782 case RDMA_CQE_TYPE_RESPONDER_RQ:
3783 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3784 &cqe->resp, &update);
3785 break;
3786 case RDMA_CQE_TYPE_INVALID:
3787 default:
3788 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3789 cqe_get_type(cqe));
3790 }
3791 num_entries -= cnt;
3792 wc += cnt;
3793 done += cnt;
3794
3795 cqe = get_cqe(cq);
3796 }
3797 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3798
3799 cq->cq_cons += new_cons - old_cons;
3800
3801 if (update)
3802 /* doorbell notifies abount latest VALID entry,
3803 * but chain already point to the next INVALID one
3804 */
3805 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3806
3807 spin_unlock_irqrestore(&cq->cq_lock, flags);
3808 return done;
3809}
Ram Amrani993d1b52016-10-10 13:15:39 +03003810
3811int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3812 u8 port_num,
3813 const struct ib_wc *in_wc,
3814 const struct ib_grh *in_grh,
3815 const struct ib_mad_hdr *mad_hdr,
3816 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3817 size_t *out_mad_size, u16 *out_mad_pkey_index)
3818{
3819 struct qedr_dev *dev = get_qedr_dev(ibdev);
3820
3821 DP_DEBUG(dev, QEDR_MSG_GSI,
3822 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3823 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3824 mad_hdr->class_specific, mad_hdr->class_version,
3825 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3826 return IB_MAD_RESULT_SUCCESS;
3827}