blob: 5acb9eab64ed10872ab09da176e94bdd35d92911 [file] [log] [blame]
Ram Amraniac1b36e2016-10-10 13:15:32 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020046#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030048#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
Kalderon, Michal99d195c2017-07-26 14:41:51 +030052#include "qedr_roce_cm.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030053
Ram Amrania7efd772016-10-10 13:15:33 +030054#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
Amrani, Ramc75d3ec2017-06-26 19:05:04 +030056static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
Ram Amrania7efd772016-10-10 13:15:33 +030064int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
Kalderon, Michale6a38c52017-07-26 14:41:52 +030073int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
Ram Amraniac1b36e2016-10-10 13:15:32 +030087int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
88 union ib_gid *sgid)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 int rc = 0;
92
93 if (!rdma_cap_roce_gid_table(ibdev, port))
94 return -ENODEV;
95
96 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
97 if (rc == -EAGAIN) {
98 memcpy(sgid, &zgid, sizeof(*sgid));
99 return 0;
100 }
101
102 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
103 sgid->global.interface_id, sgid->global.subnet_prefix);
104
105 return rc;
106}
107
108int qedr_add_gid(struct ib_device *device, u8 port_num,
109 unsigned int index, const union ib_gid *gid,
110 const struct ib_gid_attr *attr, void **context)
111{
112 if (!rdma_cap_roce_gid_table(device, port_num))
113 return -EINVAL;
114
115 if (port_num > QEDR_MAX_PORT)
116 return -EINVAL;
117
118 if (!context)
119 return -EINVAL;
120
121 return 0;
122}
123
124int qedr_del_gid(struct ib_device *device, u8 port_num,
125 unsigned int index, void **context)
126{
127 if (!rdma_cap_roce_gid_table(device, port_num))
128 return -EINVAL;
129
130 if (port_num > QEDR_MAX_PORT)
131 return -EINVAL;
132
133 if (!context)
134 return -EINVAL;
135
136 return 0;
137}
138
139int qedr_query_device(struct ib_device *ibdev,
140 struct ib_device_attr *attr, struct ib_udata *udata)
141{
142 struct qedr_dev *dev = get_qedr_dev(ibdev);
143 struct qedr_device_attr *qattr = &dev->attr;
144
145 if (!dev->rdma_ctx) {
146 DP_ERR(dev,
147 "qedr_query_device called with invalid params rdma_ctx=%p\n",
148 dev->rdma_ctx);
149 return -EINVAL;
150 }
151
152 memset(attr, 0, sizeof(*attr));
153
154 attr->fw_ver = qattr->fw_ver;
155 attr->sys_image_guid = qattr->sys_image_guid;
156 attr->max_mr_size = qattr->max_mr_size;
157 attr->page_size_cap = qattr->page_size_caps;
158 attr->vendor_id = qattr->vendor_id;
159 attr->vendor_part_id = qattr->vendor_part_id;
160 attr->hw_ver = qattr->hw_ver;
161 attr->max_qp = qattr->max_qp;
162 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
163 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
164 IB_DEVICE_RC_RNR_NAK_GEN |
165 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
166
167 attr->max_sge = qattr->max_sge;
168 attr->max_sge_rd = qattr->max_sge;
169 attr->max_cq = qattr->max_cq;
170 attr->max_cqe = qattr->max_cqe;
171 attr->max_mr = qattr->max_mr;
172 attr->max_mw = qattr->max_mw;
173 attr->max_pd = qattr->max_pd;
174 attr->atomic_cap = dev->atomic_cap;
175 attr->max_fmr = qattr->max_fmr;
176 attr->max_map_per_fmr = 16;
177 attr->max_qp_init_rd_atom =
178 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
179 attr->max_qp_rd_atom =
180 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
181 attr->max_qp_init_rd_atom);
182
183 attr->max_srq = qattr->max_srq;
184 attr->max_srq_sge = qattr->max_srq_sge;
185 attr->max_srq_wr = qattr->max_srq_wr;
186
187 attr->local_ca_ack_delay = qattr->dev_ack_delay;
188 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
189 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
190 attr->max_ah = qattr->max_ah;
191
192 return 0;
193}
194
195#define QEDR_SPEED_SDR (1)
196#define QEDR_SPEED_DDR (2)
197#define QEDR_SPEED_QDR (4)
198#define QEDR_SPEED_FDR10 (8)
199#define QEDR_SPEED_FDR (16)
200#define QEDR_SPEED_EDR (32)
201
202static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
203 u8 *ib_width)
204{
205 switch (speed) {
206 case 1000:
207 *ib_speed = QEDR_SPEED_SDR;
208 *ib_width = IB_WIDTH_1X;
209 break;
210 case 10000:
211 *ib_speed = QEDR_SPEED_QDR;
212 *ib_width = IB_WIDTH_1X;
213 break;
214
215 case 20000:
216 *ib_speed = QEDR_SPEED_DDR;
217 *ib_width = IB_WIDTH_4X;
218 break;
219
220 case 25000:
221 *ib_speed = QEDR_SPEED_EDR;
222 *ib_width = IB_WIDTH_1X;
223 break;
224
225 case 40000:
226 *ib_speed = QEDR_SPEED_QDR;
227 *ib_width = IB_WIDTH_4X;
228 break;
229
230 case 50000:
231 *ib_speed = QEDR_SPEED_QDR;
232 *ib_width = IB_WIDTH_4X;
233 break;
234
235 case 100000:
236 *ib_speed = QEDR_SPEED_EDR;
237 *ib_width = IB_WIDTH_4X;
238 break;
239
240 default:
241 /* Unsupported */
242 *ib_speed = QEDR_SPEED_SDR;
243 *ib_width = IB_WIDTH_1X;
244 }
245}
246
247int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
248{
249 struct qedr_dev *dev;
250 struct qed_rdma_port *rdma_port;
251
252 dev = get_qedr_dev(ibdev);
253 if (port > 1) {
254 DP_ERR(dev, "invalid_port=0x%x\n", port);
255 return -EINVAL;
256 }
257
258 if (!dev->rdma_ctx) {
259 DP_ERR(dev, "rdma_ctx is NULL\n");
260 return -EINVAL;
261 }
262
263 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300264
Or Gerlitzc4550c62017-01-24 13:02:39 +0200265 /* *attr being zeroed by the caller, avoid zeroing it here */
Ram Amraniac1b36e2016-10-10 13:15:32 +0300266 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
267 attr->state = IB_PORT_ACTIVE;
268 attr->phys_state = 5;
269 } else {
270 attr->state = IB_PORT_DOWN;
271 attr->phys_state = 3;
272 }
273 attr->max_mtu = IB_MTU_4096;
274 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
275 attr->lid = 0;
276 attr->lmc = 0;
277 attr->sm_lid = 0;
278 attr->sm_sl = 0;
279 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
Kalderon, Michalf5b1b172017-07-26 14:41:53 +0300280 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
281 attr->gid_tbl_len = 1;
282 attr->pkey_tbl_len = 1;
283 } else {
284 attr->gid_tbl_len = QEDR_MAX_SGID;
285 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
286 }
Ram Amraniac1b36e2016-10-10 13:15:32 +0300287 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
288 attr->qkey_viol_cntr = 0;
289 get_link_speed_and_width(rdma_port->link_speed,
290 &attr->active_speed, &attr->active_width);
291 attr->max_msg_sz = rdma_port->max_msg_size;
292 attr->max_vl_num = 4;
293
294 return 0;
295}
296
297int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
298 struct ib_port_modify *props)
299{
300 struct qedr_dev *dev;
301
302 dev = get_qedr_dev(ibdev);
303 if (port > 1) {
304 DP_ERR(dev, "invalid_port=0x%x\n", port);
305 return -EINVAL;
306 }
307
308 return 0;
309}
310
311static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
312 unsigned long len)
313{
314 struct qedr_mm *mm;
315
316 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
317 if (!mm)
318 return -ENOMEM;
319
320 mm->key.phy_addr = phy_addr;
321 /* This function might be called with a length which is not a multiple
322 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
323 * forces this granularity by increasing the requested size if needed.
324 * When qedr_mmap is called, it will search the list with the updated
325 * length as a key. To prevent search failures, the length is rounded up
326 * in advance to PAGE_SIZE.
327 */
328 mm->key.len = roundup(len, PAGE_SIZE);
329 INIT_LIST_HEAD(&mm->entry);
330
331 mutex_lock(&uctx->mm_list_lock);
332 list_add(&mm->entry, &uctx->mm_head);
333 mutex_unlock(&uctx->mm_list_lock);
334
335 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
336 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
337 (unsigned long long)mm->key.phy_addr,
338 (unsigned long)mm->key.len, uctx);
339
340 return 0;
341}
342
343static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
344 unsigned long len)
345{
346 bool found = false;
347 struct qedr_mm *mm;
348
349 mutex_lock(&uctx->mm_list_lock);
350 list_for_each_entry(mm, &uctx->mm_head, entry) {
351 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
352 continue;
353
354 found = true;
355 break;
356 }
357 mutex_unlock(&uctx->mm_list_lock);
358 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
359 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
360 mm->key.phy_addr, mm->key.len, uctx, found);
361
362 return found;
363}
364
365struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
366 struct ib_udata *udata)
367{
368 int rc;
369 struct qedr_ucontext *ctx;
370 struct qedr_alloc_ucontext_resp uresp;
371 struct qedr_dev *dev = get_qedr_dev(ibdev);
372 struct qed_rdma_add_user_out_params oparams;
373
374 if (!udata)
375 return ERR_PTR(-EFAULT);
376
377 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
378 if (!ctx)
379 return ERR_PTR(-ENOMEM);
380
381 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
382 if (rc) {
383 DP_ERR(dev,
384 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
385 rc);
386 goto err;
387 }
388
389 ctx->dpi = oparams.dpi;
390 ctx->dpi_addr = oparams.dpi_addr;
391 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
392 ctx->dpi_size = oparams.dpi_size;
393 INIT_LIST_HEAD(&ctx->mm_head);
394 mutex_init(&ctx->mm_list_lock);
395
396 memset(&uresp, 0, sizeof(uresp));
397
Amrani, Ramad84dad2017-06-26 19:05:05 +0300398 uresp.dpm_enabled = dev->user_dpm_enabled;
Amrani, Ram67cbe352017-06-26 19:05:06 +0300399 uresp.wids_enabled = 1;
400 uresp.wid_count = oparams.wid_count;
Ram Amraniac1b36e2016-10-10 13:15:32 +0300401 uresp.db_pa = ctx->dpi_phys_addr;
402 uresp.db_size = ctx->dpi_size;
403 uresp.max_send_wr = dev->attr.max_sqe;
404 uresp.max_recv_wr = dev->attr.max_rqe;
405 uresp.max_srq_wr = dev->attr.max_srq_wr;
406 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
407 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
408 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
409 uresp.max_cqes = QEDR_MAX_CQES;
410
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300411 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amraniac1b36e2016-10-10 13:15:32 +0300412 if (rc)
413 goto err;
414
415 ctx->dev = dev;
416
417 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
418 if (rc)
419 goto err;
420
421 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
422 &ctx->ibucontext);
423 return &ctx->ibucontext;
424
425err:
426 kfree(ctx);
427 return ERR_PTR(rc);
428}
429
430int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
431{
432 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
433 struct qedr_mm *mm, *tmp;
434 int status = 0;
435
436 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
437 uctx);
438 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
439
440 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
441 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
442 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
443 mm->key.phy_addr, mm->key.len, uctx);
444 list_del(&mm->entry);
445 kfree(mm);
446 }
447
448 kfree(uctx);
449 return status;
450}
451
452int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
453{
454 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
455 struct qedr_dev *dev = get_qedr_dev(context->device);
456 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
457 u64 unmapped_db = dev->db_phys_addr;
458 unsigned long len = (vma->vm_end - vma->vm_start);
459 int rc = 0;
460 bool found;
461
462 DP_DEBUG(dev, QEDR_MSG_INIT,
463 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
464 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
465 if (vma->vm_start & (PAGE_SIZE - 1)) {
466 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
467 vma->vm_start);
468 return -EINVAL;
469 }
470
471 found = qedr_search_mmap(ucontext, vm_page, len);
472 if (!found) {
473 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
474 vma->vm_pgoff);
475 return -EINVAL;
476 }
477
478 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
479
480 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
481 dev->db_size))) {
482 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
483 if (vma->vm_flags & VM_READ) {
484 DP_ERR(dev, "Trying to map doorbell bar for read\n");
485 return -EPERM;
486 }
487
488 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
489
490 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
491 PAGE_SIZE, vma->vm_page_prot);
492 } else {
493 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
494 rc = remap_pfn_range(vma, vma->vm_start,
495 vma->vm_pgoff, len, vma->vm_page_prot);
496 }
497 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
498 return rc;
499}
Ram Amrania7efd772016-10-10 13:15:33 +0300500
501struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
502 struct ib_ucontext *context, struct ib_udata *udata)
503{
504 struct qedr_dev *dev = get_qedr_dev(ibdev);
Ram Amrania7efd772016-10-10 13:15:33 +0300505 struct qedr_pd *pd;
506 u16 pd_id;
507 int rc;
508
509 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
510 (udata && context) ? "User Lib" : "Kernel");
511
512 if (!dev->rdma_ctx) {
Colin Ian King847cb1a2017-08-24 09:25:53 +0100513 DP_ERR(dev, "invalid RDMA context\n");
Ram Amrania7efd772016-10-10 13:15:33 +0300514 return ERR_PTR(-EINVAL);
515 }
516
517 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
518 if (!pd)
519 return ERR_PTR(-ENOMEM);
520
Ram Amrani9c1e0222017-01-24 13:51:42 +0200521 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
522 if (rc)
523 goto err;
Ram Amrania7efd772016-10-10 13:15:33 +0300524
Ram Amrania7efd772016-10-10 13:15:33 +0300525 pd->pd_id = pd_id;
526
527 if (udata && context) {
Ram Amrani9c1e0222017-01-24 13:51:42 +0200528 struct qedr_alloc_pd_uresp uresp;
529
530 uresp.pd_id = pd_id;
531
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300532 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrani9c1e0222017-01-24 13:51:42 +0200533 if (rc) {
Ram Amrania7efd772016-10-10 13:15:33 +0300534 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
Ram Amrani9c1e0222017-01-24 13:51:42 +0200535 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
536 goto err;
537 }
538
539 pd->uctx = get_qedr_ucontext(context);
540 pd->uctx->pd = pd;
Ram Amrania7efd772016-10-10 13:15:33 +0300541 }
542
543 return &pd->ibpd;
Ram Amrani9c1e0222017-01-24 13:51:42 +0200544
545err:
546 kfree(pd);
547 return ERR_PTR(rc);
Ram Amrania7efd772016-10-10 13:15:33 +0300548}
549
550int qedr_dealloc_pd(struct ib_pd *ibpd)
551{
552 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
553 struct qedr_pd *pd = get_qedr_pd(ibpd);
554
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100555 if (!pd) {
Ram Amrania7efd772016-10-10 13:15:33 +0300556 pr_err("Invalid PD received in dealloc_pd\n");
Colin Ian Kingea7ef2a2016-10-18 19:39:28 +0100557 return -EINVAL;
558 }
Ram Amrania7efd772016-10-10 13:15:33 +0300559
560 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
561 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
562
563 kfree(pd);
564
565 return 0;
566}
567
568static void qedr_free_pbl(struct qedr_dev *dev,
569 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
570{
571 struct pci_dev *pdev = dev->pdev;
572 int i;
573
574 for (i = 0; i < pbl_info->num_pbls; i++) {
575 if (!pbl[i].va)
576 continue;
577 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
578 pbl[i].va, pbl[i].pa);
579 }
580
581 kfree(pbl);
582}
583
584#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
585#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
586
587#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
588#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
589#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
590
591static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
592 struct qedr_pbl_info *pbl_info,
593 gfp_t flags)
594{
595 struct pci_dev *pdev = dev->pdev;
596 struct qedr_pbl *pbl_table;
597 dma_addr_t *pbl_main_tbl;
598 dma_addr_t pa;
599 void *va;
600 int i;
601
602 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
603 if (!pbl_table)
604 return ERR_PTR(-ENOMEM);
605
606 for (i = 0; i < pbl_info->num_pbls; i++) {
607 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
608 &pa, flags);
609 if (!va)
610 goto err;
611
612 memset(va, 0, pbl_info->pbl_size);
613 pbl_table[i].va = va;
614 pbl_table[i].pa = pa;
615 }
616
617 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
618 * the first one with physical pointers to all of the rest
619 */
620 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
621 for (i = 0; i < pbl_info->num_pbls - 1; i++)
622 pbl_main_tbl[i] = pbl_table[i + 1].pa;
623
624 return pbl_table;
625
626err:
627 for (i--; i >= 0; i--)
628 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
629 pbl_table[i].va, pbl_table[i].pa);
630
631 qedr_free_pbl(dev, pbl_info, pbl_table);
632
633 return ERR_PTR(-ENOMEM);
634}
635
636static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
637 struct qedr_pbl_info *pbl_info,
638 u32 num_pbes, int two_layer_capable)
639{
640 u32 pbl_capacity;
641 u32 pbl_size;
642 u32 num_pbls;
643
644 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
645 if (num_pbes > MAX_PBES_TWO_LAYER) {
646 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
647 num_pbes);
648 return -EINVAL;
649 }
650
651 /* calculate required pbl page size */
652 pbl_size = MIN_FW_PBL_PAGE_SIZE;
653 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
654 NUM_PBES_ON_PAGE(pbl_size);
655
656 while (pbl_capacity < num_pbes) {
657 pbl_size *= 2;
658 pbl_capacity = pbl_size / sizeof(u64);
659 pbl_capacity = pbl_capacity * pbl_capacity;
660 }
661
662 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
663 num_pbls++; /* One for the layer0 ( points to the pbls) */
664 pbl_info->two_layered = true;
665 } else {
666 /* One layered PBL */
667 num_pbls = 1;
668 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
669 roundup_pow_of_two((num_pbes * sizeof(u64))));
670 pbl_info->two_layered = false;
671 }
672
673 pbl_info->num_pbls = num_pbls;
674 pbl_info->pbl_size = pbl_size;
675 pbl_info->num_pbes = num_pbes;
676
677 DP_DEBUG(dev, QEDR_MSG_MR,
678 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
679 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
680
681 return 0;
682}
683
684static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
685 struct qedr_pbl *pbl,
Ram Amranie57bb6b2017-06-05 16:32:27 +0300686 struct qedr_pbl_info *pbl_info, u32 pg_shift)
Ram Amrania7efd772016-10-10 13:15:33 +0300687{
688 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300689 u32 fw_pg_cnt, fw_pg_per_umem_pg;
Ram Amrania7efd772016-10-10 13:15:33 +0300690 struct qedr_pbl *pbl_tbl;
691 struct scatterlist *sg;
692 struct regpair *pbe;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300693 u64 pg_addr;
Ram Amrania7efd772016-10-10 13:15:33 +0300694 int entry;
Ram Amrania7efd772016-10-10 13:15:33 +0300695
696 if (!pbl_info->num_pbes)
697 return;
698
699 /* If we have a two layered pbl, the first pbl points to the rest
700 * of the pbls and the first entry lays on the second pbl in the table
701 */
702 if (pbl_info->two_layered)
703 pbl_tbl = &pbl[1];
704 else
705 pbl_tbl = pbl;
706
707 pbe = (struct regpair *)pbl_tbl->va;
708 if (!pbe) {
709 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
710 return;
711 }
712
713 pbe_cnt = 0;
714
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300715 shift = umem->page_shift;
Ram Amrania7efd772016-10-10 13:15:33 +0300716
Ram Amranie57bb6b2017-06-05 16:32:27 +0300717 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
718
Ram Amrania7efd772016-10-10 13:15:33 +0300719 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
720 pages = sg_dma_len(sg) >> shift;
Ram Amranie57bb6b2017-06-05 16:32:27 +0300721 pg_addr = sg_dma_address(sg);
Ram Amrania7efd772016-10-10 13:15:33 +0300722 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
Ram Amranie57bb6b2017-06-05 16:32:27 +0300723 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
724 pbe->lo = cpu_to_le32(pg_addr);
725 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
Ram Amrania7efd772016-10-10 13:15:33 +0300726
Ram Amranie57bb6b2017-06-05 16:32:27 +0300727 pg_addr += BIT(pg_shift);
728 pbe_cnt++;
729 total_num_pbes++;
730 pbe++;
Ram Amrania7efd772016-10-10 13:15:33 +0300731
Ram Amranie57bb6b2017-06-05 16:32:27 +0300732 if (total_num_pbes == pbl_info->num_pbes)
733 return;
734
735 /* If the given pbl is full storing the pbes,
736 * move to next pbl.
737 */
738 if (pbe_cnt ==
739 (pbl_info->pbl_size / sizeof(u64))) {
740 pbl_tbl++;
741 pbe = (struct regpair *)pbl_tbl->va;
742 pbe_cnt = 0;
743 }
744
745 fw_pg_cnt++;
Ram Amrania7efd772016-10-10 13:15:33 +0300746 }
747 }
748 }
749}
750
751static int qedr_copy_cq_uresp(struct qedr_dev *dev,
752 struct qedr_cq *cq, struct ib_udata *udata)
753{
754 struct qedr_create_cq_uresp uresp;
755 int rc;
756
757 memset(&uresp, 0, sizeof(uresp));
758
759 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
760 uresp.icid = cq->icid;
761
Amrani, Ramc75d3ec2017-06-26 19:05:04 +0300762 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amrania7efd772016-10-10 13:15:33 +0300763 if (rc)
764 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
765
766 return rc;
767}
768
769static void consume_cqe(struct qedr_cq *cq)
770{
771 if (cq->latest_cqe == cq->toggle_cqe)
772 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
773
774 cq->latest_cqe = qed_chain_consume(&cq->pbl);
775}
776
777static inline int qedr_align_cq_entries(int entries)
778{
779 u64 size, aligned_size;
780
781 /* We allocate an extra entry that we don't report to the FW. */
782 size = (entries + 1) * QEDR_CQE_SIZE;
783 aligned_size = ALIGN(size, PAGE_SIZE);
784
785 return aligned_size / QEDR_CQE_SIZE;
786}
787
788static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
789 struct qedr_dev *dev,
790 struct qedr_userq *q,
791 u64 buf_addr, size_t buf_len,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300792 int access, int dmasync,
793 int alloc_and_init)
Ram Amrania7efd772016-10-10 13:15:33 +0300794{
Ram Amranie57bb6b2017-06-05 16:32:27 +0300795 u32 fw_pages;
Ram Amrania7efd772016-10-10 13:15:33 +0300796 int rc;
797
798 q->buf_addr = buf_addr;
799 q->buf_len = buf_len;
800 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
801 if (IS_ERR(q->umem)) {
802 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
803 PTR_ERR(q->umem));
804 return PTR_ERR(q->umem);
805 }
806
Ram Amranie57bb6b2017-06-05 16:32:27 +0300807 fw_pages = ib_umem_page_count(q->umem) <<
808 (q->umem->page_shift - FW_PAGE_SHIFT);
809
810 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
Ram Amrania7efd772016-10-10 13:15:33 +0300811 if (rc)
812 goto err0;
813
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300814 if (alloc_and_init) {
815 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
816 if (IS_ERR(q->pbl_tbl)) {
817 rc = PTR_ERR(q->pbl_tbl);
818 goto err0;
819 }
Ram Amranie57bb6b2017-06-05 16:32:27 +0300820 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
821 FW_PAGE_SHIFT);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300822 } else {
823 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
Dan Carpenter89fd2572017-08-25 11:18:39 +0300824 if (!q->pbl_tbl) {
825 rc = -ENOMEM;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300826 goto err0;
Dan Carpenter89fd2572017-08-25 11:18:39 +0300827 }
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300828 }
Ram Amrania7efd772016-10-10 13:15:33 +0300829
830 return 0;
831
832err0:
833 ib_umem_release(q->umem);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300834 q->umem = NULL;
Ram Amrania7efd772016-10-10 13:15:33 +0300835
836 return rc;
837}
838
839static inline void qedr_init_cq_params(struct qedr_cq *cq,
840 struct qedr_ucontext *ctx,
841 struct qedr_dev *dev, int vector,
842 int chain_entries, int page_cnt,
843 u64 pbl_ptr,
844 struct qed_rdma_create_cq_in_params
845 *params)
846{
847 memset(params, 0, sizeof(*params));
848 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
849 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
850 params->cnq_id = vector;
851 params->cq_size = chain_entries - 1;
852 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
853 params->pbl_num_pages = page_cnt;
854 params->pbl_ptr = pbl_ptr;
855 params->pbl_two_level = 0;
856}
857
858static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
859{
860 /* Flush data before signalling doorbell */
861 wmb();
862 cq->db.data.agg_flags = flags;
863 cq->db.data.value = cpu_to_le32(cons);
864 writeq(cq->db.raw, cq->db_addr);
865
866 /* Make sure write would stick */
867 mmiowb();
868}
869
870int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
871{
872 struct qedr_cq *cq = get_qedr_cq(ibcq);
873 unsigned long sflags;
Amrani, Ram4dd72632017-04-27 13:35:34 +0300874 struct qedr_dev *dev;
875
876 dev = get_qedr_dev(ibcq->device);
877
878 if (cq->destroyed) {
879 DP_ERR(dev,
880 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
881 cq, cq->icid);
882 return -EINVAL;
883 }
884
Ram Amrania7efd772016-10-10 13:15:33 +0300885
886 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
887 return 0;
888
889 spin_lock_irqsave(&cq->cq_lock, sflags);
890
891 cq->arm_flags = 0;
892
893 if (flags & IB_CQ_SOLICITED)
894 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
895
896 if (flags & IB_CQ_NEXT_COMP)
897 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
898
899 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
900
901 spin_unlock_irqrestore(&cq->cq_lock, sflags);
902
903 return 0;
904}
905
906struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
907 const struct ib_cq_init_attr *attr,
908 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
909{
910 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
911 struct qed_rdma_destroy_cq_out_params destroy_oparams;
912 struct qed_rdma_destroy_cq_in_params destroy_iparams;
913 struct qedr_dev *dev = get_qedr_dev(ibdev);
914 struct qed_rdma_create_cq_in_params params;
915 struct qedr_create_cq_ureq ureq;
916 int vector = attr->comp_vector;
917 int entries = attr->cqe;
918 struct qedr_cq *cq;
919 int chain_entries;
920 int page_cnt;
921 u64 pbl_ptr;
922 u16 icid;
923 int rc;
924
925 DP_DEBUG(dev, QEDR_MSG_INIT,
926 "create_cq: called from %s. entries=%d, vector=%d\n",
927 udata ? "User Lib" : "Kernel", entries, vector);
928
929 if (entries > QEDR_MAX_CQES) {
930 DP_ERR(dev,
931 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
932 entries, QEDR_MAX_CQES);
933 return ERR_PTR(-EINVAL);
934 }
935
936 chain_entries = qedr_align_cq_entries(entries);
937 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
938
939 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
940 if (!cq)
941 return ERR_PTR(-ENOMEM);
942
943 if (udata) {
944 memset(&ureq, 0, sizeof(ureq));
945 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
946 DP_ERR(dev,
947 "create cq: problem copying data from user space\n");
948 goto err0;
949 }
950
951 if (!ureq.len) {
952 DP_ERR(dev,
953 "create cq: cannot create a cq with 0 entries\n");
954 goto err0;
955 }
956
957 cq->cq_type = QEDR_CQ_TYPE_USER;
958
959 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +0300960 ureq.len, IB_ACCESS_LOCAL_WRITE,
961 1, 1);
Ram Amrania7efd772016-10-10 13:15:33 +0300962 if (rc)
963 goto err0;
964
965 pbl_ptr = cq->q.pbl_tbl->pa;
966 page_cnt = cq->q.pbl_info.num_pbes;
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200967
968 cq->ibcq.cqe = chain_entries;
Ram Amrania7efd772016-10-10 13:15:33 +0300969 } else {
970 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
971
972 rc = dev->ops->common->chain_alloc(dev->cdev,
973 QED_CHAIN_USE_TO_CONSUME,
974 QED_CHAIN_MODE_PBL,
975 QED_CHAIN_CNT_TYPE_U32,
976 chain_entries,
977 sizeof(union rdma_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +0300978 &cq->pbl, NULL);
Ram Amrania7efd772016-10-10 13:15:33 +0300979 if (rc)
980 goto err1;
981
982 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
983 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
Amrani, Ramc7eb3bc2016-12-22 14:40:33 +0200984 cq->ibcq.cqe = cq->pbl.capacity;
Ram Amrania7efd772016-10-10 13:15:33 +0300985 }
986
987 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
988 pbl_ptr, &params);
989
990 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
991 if (rc)
992 goto err2;
993
994 cq->icid = icid;
995 cq->sig = QEDR_CQ_MAGIC_NUMBER;
996 spin_lock_init(&cq->cq_lock);
997
998 if (ib_ctx) {
999 rc = qedr_copy_cq_uresp(dev, cq, udata);
1000 if (rc)
1001 goto err3;
1002 } else {
1003 /* Generate doorbell address. */
1004 cq->db_addr = dev->db_addr +
1005 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1006 cq->db.data.icid = cq->icid;
1007 cq->db.data.params = DB_AGG_CMD_SET <<
1008 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1009
1010 /* point to the very last element, passing it we will toggle */
1011 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1012 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1013 cq->latest_cqe = NULL;
1014 consume_cqe(cq);
1015 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1016 }
1017
1018 DP_DEBUG(dev, QEDR_MSG_CQ,
1019 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1020 cq->icid, cq, params.cq_size);
1021
1022 return &cq->ibcq;
1023
1024err3:
1025 destroy_iparams.icid = cq->icid;
1026 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1027 &destroy_oparams);
1028err2:
1029 if (udata)
1030 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1031 else
1032 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1033err1:
1034 if (udata)
1035 ib_umem_release(cq->q.umem);
1036err0:
1037 kfree(cq);
1038 return ERR_PTR(-EINVAL);
1039}
1040
1041int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1042{
1043 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1044 struct qedr_cq *cq = get_qedr_cq(ibcq);
1045
1046 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1047
1048 return 0;
1049}
1050
Amrani, Ram4dd72632017-04-27 13:35:34 +03001051#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1052#define QEDR_DESTROY_CQ_ITER_DURATION (10)
1053
Ram Amrania7efd772016-10-10 13:15:33 +03001054int qedr_destroy_cq(struct ib_cq *ibcq)
1055{
1056 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1057 struct qed_rdma_destroy_cq_out_params oparams;
1058 struct qed_rdma_destroy_cq_in_params iparams;
1059 struct qedr_cq *cq = get_qedr_cq(ibcq);
Amrani, Ram4dd72632017-04-27 13:35:34 +03001060 int iter;
Amrani, Ram942b3b22017-04-27 13:35:33 +03001061 int rc;
Ram Amrania7efd772016-10-10 13:15:33 +03001062
Amrani, Ram942b3b22017-04-27 13:35:33 +03001063 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
Ram Amrania7efd772016-10-10 13:15:33 +03001064
Amrani, Ram4dd72632017-04-27 13:35:34 +03001065 cq->destroyed = 1;
1066
Ram Amrania7efd772016-10-10 13:15:33 +03001067 /* GSIs CQs are handled by driver, so they don't exist in the FW */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001068 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1069 goto done;
Amrani, Rama1211352016-12-22 14:40:34 +02001070
Amrani, Ram942b3b22017-04-27 13:35:33 +03001071 iparams.icid = cq->icid;
1072 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1073 if (rc)
1074 return rc;
1075
1076 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +03001077
1078 if (ibcq->uobject && ibcq->uobject->context) {
1079 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1080 ib_umem_release(cq->q.umem);
1081 }
1082
Amrani, Ram4dd72632017-04-27 13:35:34 +03001083 /* We don't want the IRQ handler to handle a non-existing CQ so we
1084 * wait until all CNQ interrupts, if any, are received. This will always
1085 * happen and will always happen very fast. If not, then a serious error
1086 * has occured. That is why we can use a long delay.
1087 * We spin for a short time so we don’t lose time on context switching
1088 * in case all the completions are handled in that span. Otherwise
1089 * we sleep for a while and check again. Since the CNQ may be
1090 * associated with (only) the current CPU we use msleep to allow the
1091 * current CPU to be freed.
1092 * The CNQ notification is increased in qedr_irq_handler().
1093 */
1094 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1095 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1096 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1097 iter--;
1098 }
1099
1100 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1101 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1102 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1103 iter--;
1104 }
1105
1106 if (oparams.num_cq_notif != cq->cnq_notif)
1107 goto err;
1108
1109 /* Note that we don't need to have explicit code to wait for the
1110 * completion of the event handler because it is invoked from the EQ.
1111 * Since the destroy CQ ramrod has also been received on the EQ we can
1112 * be certain that there's no event handler in process.
1113 */
Amrani, Ram942b3b22017-04-27 13:35:33 +03001114done:
Amrani, Ram4dd72632017-04-27 13:35:34 +03001115 cq->sig = ~cq->sig;
1116
Ram Amrania7efd772016-10-10 13:15:33 +03001117 kfree(cq);
1118
1119 return 0;
Amrani, Ram4dd72632017-04-27 13:35:34 +03001120
1121err:
1122 DP_ERR(dev,
1123 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1124 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1125
1126 return -EINVAL;
Ram Amrania7efd772016-10-10 13:15:33 +03001127}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001128
1129static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1130 struct ib_qp_attr *attr,
1131 int attr_mask,
1132 struct qed_rdma_modify_qp_in_params
1133 *qp_params)
1134{
1135 enum rdma_network_type nw_type;
1136 struct ib_gid_attr gid_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001137 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001138 union ib_gid gid;
1139 u32 ipv4_addr;
1140 int rc = 0;
1141 int i;
1142
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001143 rc = ib_get_cached_gid(ibqp->device,
1144 rdma_ah_get_port_num(&attr->ah_attr),
1145 grh->sgid_index, &gid, &gid_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001146 if (rc)
1147 return rc;
1148
1149 if (!memcmp(&gid, &zgid, sizeof(gid)))
1150 return -ENOENT;
1151
1152 if (gid_attr.ndev) {
1153 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1154
1155 dev_put(gid_attr.ndev);
1156 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1157 switch (nw_type) {
1158 case RDMA_NETWORK_IPV6:
1159 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1160 sizeof(qp_params->sgid));
1161 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001162 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001163 sizeof(qp_params->dgid));
1164 qp_params->roce_mode = ROCE_V2_IPV6;
1165 SET_FIELD(qp_params->modify_flags,
1166 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1167 break;
1168 case RDMA_NETWORK_IB:
1169 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1170 sizeof(qp_params->sgid));
1171 memcpy(&qp_params->dgid.bytes[0],
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001172 &grh->dgid,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001173 sizeof(qp_params->dgid));
1174 qp_params->roce_mode = ROCE_V1;
1175 break;
1176 case RDMA_NETWORK_IPV4:
1177 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1178 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1179 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1180 qp_params->sgid.ipv4_addr = ipv4_addr;
1181 ipv4_addr =
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001182 qedr_get_ipv4_from_gid(grh->dgid.raw);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001183 qp_params->dgid.ipv4_addr = ipv4_addr;
1184 SET_FIELD(qp_params->modify_flags,
1185 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1186 qp_params->roce_mode = ROCE_V2_IPV4;
1187 break;
1188 }
1189 }
1190
1191 for (i = 0; i < 4; i++) {
1192 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1193 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1194 }
1195
1196 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1197 qp_params->vlan_id = 0;
1198
1199 return 0;
1200}
1201
Ram Amranicecbcdd2016-10-10 13:15:34 +03001202static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1203 struct ib_qp_init_attr *attrs)
1204{
1205 struct qedr_device_attr *qattr = &dev->attr;
1206
1207 /* QP0... attrs->qp_type == IB_QPT_GSI */
1208 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1209 DP_DEBUG(dev, QEDR_MSG_QP,
1210 "create qp: unsupported qp type=0x%x requested\n",
1211 attrs->qp_type);
1212 return -EINVAL;
1213 }
1214
1215 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1216 DP_ERR(dev,
1217 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1218 attrs->cap.max_send_wr, qattr->max_sqe);
1219 return -EINVAL;
1220 }
1221
1222 if (attrs->cap.max_inline_data > qattr->max_inline) {
1223 DP_ERR(dev,
1224 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1225 attrs->cap.max_inline_data, qattr->max_inline);
1226 return -EINVAL;
1227 }
1228
1229 if (attrs->cap.max_send_sge > qattr->max_sge) {
1230 DP_ERR(dev,
1231 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1232 attrs->cap.max_send_sge, qattr->max_sge);
1233 return -EINVAL;
1234 }
1235
1236 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1237 DP_ERR(dev,
1238 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1239 attrs->cap.max_recv_sge, qattr->max_sge);
1240 return -EINVAL;
1241 }
1242
1243 /* Unprivileged user space cannot create special QP */
1244 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1245 DP_ERR(dev,
1246 "create qp: userspace can't create special QPs of type=0x%x\n",
1247 attrs->qp_type);
1248 return -EINVAL;
1249 }
1250
1251 return 0;
1252}
1253
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001254static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1255 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001256 struct qedr_qp *qp)
1257{
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001258 /* iWARP requires two doorbells per RQ. */
1259 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1260 uresp->rq_db_offset =
1261 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1262 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1263 } else {
1264 uresp->rq_db_offset =
1265 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1266 }
1267
Ram Amranicecbcdd2016-10-10 13:15:34 +03001268 uresp->rq_icid = qp->icid;
1269}
1270
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001271static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1272 struct qedr_create_qp_uresp *uresp,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001273 struct qedr_qp *qp)
1274{
1275 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001276
1277 /* iWARP uses the same cid for rq and sq */
1278 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1279 uresp->sq_icid = qp->icid;
1280 else
1281 uresp->sq_icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001282}
1283
1284static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1285 struct qedr_qp *qp, struct ib_udata *udata)
1286{
1287 struct qedr_create_qp_uresp uresp;
1288 int rc;
1289
1290 memset(&uresp, 0, sizeof(uresp));
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001291 qedr_copy_sq_uresp(dev, &uresp, qp);
1292 qedr_copy_rq_uresp(dev, &uresp, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001293
1294 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1295 uresp.qp_id = qp->qp_id;
1296
Amrani, Ramc75d3ec2017-06-26 19:05:04 +03001297 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Ram Amranicecbcdd2016-10-10 13:15:34 +03001298 if (rc)
1299 DP_ERR(dev,
1300 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1301 qp->icid);
1302
1303 return rc;
1304}
1305
Amrani, Ramdf158562016-12-22 14:52:24 +02001306static void qedr_set_common_qp_params(struct qedr_dev *dev,
1307 struct qedr_qp *qp,
1308 struct qedr_pd *pd,
1309 struct ib_qp_init_attr *attrs)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001310{
Ram Amranicecbcdd2016-10-10 13:15:34 +03001311 spin_lock_init(&qp->q_lock);
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001312 atomic_set(&qp->refcnt, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001313 qp->pd = pd;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001314 qp->qp_type = attrs->qp_type;
1315 qp->max_inline_data = attrs->cap.max_inline_data;
1316 qp->sq.max_sges = attrs->cap.max_send_sge;
1317 qp->state = QED_ROCE_QP_STATE_RESET;
1318 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1319 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1320 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1321 qp->dev = dev;
Amrani, Ramdf158562016-12-22 14:52:24 +02001322 qp->rq.max_sges = attrs->cap.max_recv_sge;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001323
1324 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001325 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1326 qp->rq.max_sges, qp->rq_cq->icid);
1327 DP_DEBUG(dev, QEDR_MSG_QP,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001328 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1329 pd->pd_id, qp->qp_type, qp->max_inline_data,
1330 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1331 DP_DEBUG(dev, QEDR_MSG_QP,
1332 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1333 qp->sq.max_sges, qp->sq_cq->icid);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001334}
1335
Amrani, Ramdf158562016-12-22 14:52:24 +02001336static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001337{
1338 qp->sq.db = dev->db_addr +
1339 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1340 qp->sq.db_data.data.icid = qp->icid + 1;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001341 qp->rq.db = dev->db_addr +
1342 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1343 qp->rq.db_data.data.icid = qp->icid;
1344}
1345
Amrani, Ramdf158562016-12-22 14:52:24 +02001346static inline void
1347qedr_init_common_qp_in_params(struct qedr_dev *dev,
1348 struct qedr_pd *pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001349 struct qedr_qp *qp,
1350 struct ib_qp_init_attr *attrs,
Amrani, Ramdf158562016-12-22 14:52:24 +02001351 bool fmr_and_reserved_lkey,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001352 struct qed_rdma_create_qp_in_params *params)
1353{
Amrani, Ramdf158562016-12-22 14:52:24 +02001354 /* QP handle to be written in an async event */
1355 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1356 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001357
Amrani, Ramdf158562016-12-22 14:52:24 +02001358 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1359 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1360 params->pd = pd->pd_id;
1361 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1362 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1363 params->stats_queue = 0;
1364 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1365 params->srq_id = 0;
1366 params->use_srq = false;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001367}
1368
Amrani, Ramdf158562016-12-22 14:52:24 +02001369static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001370{
Amrani, Ramdf158562016-12-22 14:52:24 +02001371 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1372 "qp=%p. "
1373 "sq_addr=0x%llx, "
1374 "sq_len=%zd, "
1375 "rq_addr=0x%llx, "
1376 "rq_len=%zd"
1377 "\n",
1378 qp,
1379 qp->usq.buf_addr,
1380 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1381}
1382
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001383static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
1384{
1385 int rc;
1386
1387 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1388 return 0;
1389
1390 idr_preload(GFP_KERNEL);
1391 spin_lock_irq(&dev->idr_lock);
1392
1393 rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
1394
1395 spin_unlock_irq(&dev->idr_lock);
1396 idr_preload_end();
1397
1398 return rc < 0 ? rc : 0;
1399}
1400
1401static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
1402{
1403 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1404 return;
1405
1406 spin_lock_irq(&dev->idr_lock);
1407 idr_remove(&dev->qpidr, id);
1408 spin_unlock_irq(&dev->idr_lock);
1409}
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001410
1411static inline void
1412qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1413 struct qedr_qp *qp,
1414 struct qed_rdma_create_qp_out_params *out_params)
1415{
1416 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1417 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1418
1419 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1420 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1421
1422 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1423 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1424
1425 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1426 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1427}
1428
Amrani, Ramdf158562016-12-22 14:52:24 +02001429static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1430{
1431 if (qp->usq.umem)
1432 ib_umem_release(qp->usq.umem);
1433 qp->usq.umem = NULL;
1434
1435 if (qp->urq.umem)
1436 ib_umem_release(qp->urq.umem);
1437 qp->urq.umem = NULL;
1438}
1439
1440static int qedr_create_user_qp(struct qedr_dev *dev,
1441 struct qedr_qp *qp,
1442 struct ib_pd *ibpd,
1443 struct ib_udata *udata,
1444 struct ib_qp_init_attr *attrs)
1445{
1446 struct qed_rdma_create_qp_in_params in_params;
1447 struct qed_rdma_create_qp_out_params out_params;
1448 struct qedr_pd *pd = get_qedr_pd(ibpd);
1449 struct ib_ucontext *ib_ctx = NULL;
1450 struct qedr_ucontext *ctx = NULL;
1451 struct qedr_create_qp_ureq ureq;
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001452 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
Amrani, Ramdf158562016-12-22 14:52:24 +02001453 int rc = -EINVAL;
1454
1455 ib_ctx = ibpd->uobject->context;
1456 ctx = get_qedr_ucontext(ib_ctx);
1457
1458 memset(&ureq, 0, sizeof(ureq));
1459 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1460 if (rc) {
1461 DP_ERR(dev, "Problem copying data from user space\n");
1462 return rc;
1463 }
1464
1465 /* SQ - read access only (0), dma sync not required (0) */
1466 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001467 ureq.sq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001468 if (rc)
1469 return rc;
1470
1471 /* RQ - read access only (0), dma sync not required (0) */
1472 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001473 ureq.rq_len, 0, 0, alloc_and_init);
Amrani, Ramdf158562016-12-22 14:52:24 +02001474 if (rc)
1475 return rc;
1476
1477 memset(&in_params, 0, sizeof(in_params));
1478 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1479 in_params.qp_handle_lo = ureq.qp_handle_lo;
1480 in_params.qp_handle_hi = ureq.qp_handle_hi;
1481 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1482 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1483 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1484 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1485
1486 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1487 &in_params, &out_params);
1488
1489 if (!qp->qed_qp) {
1490 rc = -ENOMEM;
1491 goto err1;
1492 }
1493
Kalderon, Michal69ad0e72017-07-26 14:41:57 +03001494 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1495 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1496
Amrani, Ramdf158562016-12-22 14:52:24 +02001497 qp->qp_id = out_params.qp_id;
1498 qp->icid = out_params.icid;
1499
1500 rc = qedr_copy_qp_uresp(dev, qp, udata);
1501 if (rc)
1502 goto err;
1503
1504 qedr_qp_user_print(dev, qp);
1505
1506 return 0;
1507err:
1508 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1509 if (rc)
1510 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1511
1512err1:
1513 qedr_cleanup_user(dev, qp);
1514 return rc;
1515}
1516
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001517static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1518{
1519 qp->sq.db = dev->db_addr +
1520 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1521 qp->sq.db_data.data.icid = qp->icid;
1522
1523 qp->rq.db = dev->db_addr +
1524 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1525 qp->rq.db_data.data.icid = qp->icid;
1526 qp->rq.iwarp_db2 = dev->db_addr +
1527 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1528 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1529 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1530}
1531
Amrani, Ramdf158562016-12-22 14:52:24 +02001532static int
1533qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1534 struct qedr_qp *qp,
1535 struct qed_rdma_create_qp_in_params *in_params,
1536 u32 n_sq_elems, u32 n_rq_elems)
1537{
1538 struct qed_rdma_create_qp_out_params out_params;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001539 int rc;
1540
Ram Amranicecbcdd2016-10-10 13:15:34 +03001541 rc = dev->ops->common->chain_alloc(dev->cdev,
1542 QED_CHAIN_USE_TO_PRODUCE,
1543 QED_CHAIN_MODE_PBL,
1544 QED_CHAIN_CNT_TYPE_U32,
1545 n_sq_elems,
1546 QEDR_SQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001547 &qp->sq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001548
1549 if (rc)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001550 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001551
Amrani, Ramdf158562016-12-22 14:52:24 +02001552 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1553 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001554
Ram Amranicecbcdd2016-10-10 13:15:34 +03001555 rc = dev->ops->common->chain_alloc(dev->cdev,
1556 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1557 QED_CHAIN_MODE_PBL,
1558 QED_CHAIN_CNT_TYPE_U32,
1559 n_rq_elems,
1560 QEDR_RQE_ELEMENT_SIZE,
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001561 &qp->rq.pbl, NULL);
Amrani, Ramdf158562016-12-22 14:52:24 +02001562 if (rc)
1563 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001564
Amrani, Ramdf158562016-12-22 14:52:24 +02001565 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1566 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001567
Amrani, Ramdf158562016-12-22 14:52:24 +02001568 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1569 in_params, &out_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001570
Amrani, Ramdf158562016-12-22 14:52:24 +02001571 if (!qp->qed_qp)
1572 return -EINVAL;
1573
1574 qp->qp_id = out_params.qp_id;
1575 qp->icid = out_params.icid;
1576
1577 qedr_set_roce_db_info(dev, qp);
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001578 return rc;
1579}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001580
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001581static int
1582qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1583 struct qedr_qp *qp,
1584 struct qed_rdma_create_qp_in_params *in_params,
1585 u32 n_sq_elems, u32 n_rq_elems)
1586{
1587 struct qed_rdma_create_qp_out_params out_params;
1588 struct qed_chain_ext_pbl ext_pbl;
1589 int rc;
1590
1591 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1592 QEDR_SQE_ELEMENT_SIZE,
1593 QED_CHAIN_MODE_PBL);
1594 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1595 QEDR_RQE_ELEMENT_SIZE,
1596 QED_CHAIN_MODE_PBL);
1597
1598 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1599 in_params, &out_params);
1600
1601 if (!qp->qed_qp)
1602 return -EINVAL;
1603
1604 /* Now we allocate the chain */
1605 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1606 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1607
1608 rc = dev->ops->common->chain_alloc(dev->cdev,
1609 QED_CHAIN_USE_TO_PRODUCE,
1610 QED_CHAIN_MODE_PBL,
1611 QED_CHAIN_CNT_TYPE_U32,
1612 n_sq_elems,
1613 QEDR_SQE_ELEMENT_SIZE,
1614 &qp->sq.pbl, &ext_pbl);
1615
1616 if (rc)
1617 goto err;
1618
1619 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1620 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1621
1622 rc = dev->ops->common->chain_alloc(dev->cdev,
1623 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1624 QED_CHAIN_MODE_PBL,
1625 QED_CHAIN_CNT_TYPE_U32,
1626 n_rq_elems,
1627 QEDR_RQE_ELEMENT_SIZE,
1628 &qp->rq.pbl, &ext_pbl);
1629
1630 if (rc)
1631 goto err;
1632
1633 qp->qp_id = out_params.qp_id;
1634 qp->icid = out_params.icid;
1635
1636 qedr_set_iwarp_db_info(dev, qp);
1637 return rc;
1638
1639err:
1640 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1641
1642 return rc;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001643}
1644
Amrani, Ramdf158562016-12-22 14:52:24 +02001645static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001646{
Amrani, Ramdf158562016-12-22 14:52:24 +02001647 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1648 kfree(qp->wqe_wr_id);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001649
Amrani, Ramdf158562016-12-22 14:52:24 +02001650 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1651 kfree(qp->rqe_wr_id);
1652}
Ram Amranicecbcdd2016-10-10 13:15:34 +03001653
Amrani, Ramdf158562016-12-22 14:52:24 +02001654static int qedr_create_kernel_qp(struct qedr_dev *dev,
1655 struct qedr_qp *qp,
1656 struct ib_pd *ibpd,
1657 struct ib_qp_init_attr *attrs)
1658{
1659 struct qed_rdma_create_qp_in_params in_params;
1660 struct qedr_pd *pd = get_qedr_pd(ibpd);
1661 int rc = -EINVAL;
1662 u32 n_rq_elems;
1663 u32 n_sq_elems;
1664 u32 n_sq_entries;
1665
1666 memset(&in_params, 0, sizeof(in_params));
1667
1668 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1669 * the ring. The ring should allow at least a single WR, even if the
1670 * user requested none, due to allocation issues.
1671 * We should add an extra WR since the prod and cons indices of
1672 * wqe_wr_id are managed in such a way that the WQ is considered full
1673 * when (prod+1)%max_wr==cons. We currently don't do that because we
1674 * double the number of entries due an iSER issue that pushes far more
1675 * WRs than indicated. If we decline its ib_post_send() then we get
1676 * error prints in the dmesg we'd like to avoid.
1677 */
1678 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1679 dev->attr.max_sqe);
1680
1681 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
1682 GFP_KERNEL);
1683 if (!qp->wqe_wr_id) {
1684 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1685 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001686 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001687
Amrani, Ramdf158562016-12-22 14:52:24 +02001688 /* QP handle to be written in CQE */
1689 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1690 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001691
Amrani, Ramdf158562016-12-22 14:52:24 +02001692 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1693 * the ring. There ring should allow at least a single WR, even if the
1694 * user requested none, due to allocation issues.
1695 */
1696 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1697
1698 /* Allocate driver internal RQ array */
1699 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
1700 GFP_KERNEL);
1701 if (!qp->rqe_wr_id) {
1702 DP_ERR(dev,
1703 "create qp: failed RQ shadow memory allocation\n");
1704 kfree(qp->wqe_wr_id);
1705 return -ENOMEM;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001706 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001707
Amrani, Ramdf158562016-12-22 14:52:24 +02001708 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001709
Amrani, Ramdf158562016-12-22 14:52:24 +02001710 n_sq_entries = attrs->cap.max_send_wr;
1711 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1712 n_sq_entries = max_t(u32, n_sq_entries, 1);
1713 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001714
Amrani, Ramdf158562016-12-22 14:52:24 +02001715 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1716
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001717 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1718 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1719 n_sq_elems, n_rq_elems);
1720 else
1721 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1722 n_sq_elems, n_rq_elems);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001723 if (rc)
Amrani, Ramdf158562016-12-22 14:52:24 +02001724 qedr_cleanup_kernel(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001725
1726 return rc;
1727}
1728
1729struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1730 struct ib_qp_init_attr *attrs,
1731 struct ib_udata *udata)
1732{
1733 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001734 struct qedr_pd *pd = get_qedr_pd(ibpd);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001735 struct qedr_qp *qp;
Wei Yongjun181d8012016-10-28 16:33:47 +00001736 struct ib_qp *ibqp;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001737 int rc = 0;
1738
1739 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1740 udata ? "user library" : "kernel", pd);
1741
1742 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1743 if (rc)
1744 return ERR_PTR(rc);
1745
Wei Yongjun181d8012016-10-28 16:33:47 +00001746 if (attrs->srq)
1747 return ERR_PTR(-EINVAL);
1748
Ram Amranicecbcdd2016-10-10 13:15:34 +03001749 DP_DEBUG(dev, QEDR_MSG_QP,
Amrani, Ramdf158562016-12-22 14:52:24 +02001750 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1751 udata ? "user library" : "kernel", attrs->event_handler, pd,
Ram Amranicecbcdd2016-10-10 13:15:34 +03001752 get_qedr_cq(attrs->send_cq),
1753 get_qedr_cq(attrs->send_cq)->icid,
1754 get_qedr_cq(attrs->recv_cq),
1755 get_qedr_cq(attrs->recv_cq)->icid);
1756
Amrani, Ramdf158562016-12-22 14:52:24 +02001757 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1758 if (!qp) {
1759 DP_ERR(dev, "create qp: failed allocating memory\n");
1760 return ERR_PTR(-ENOMEM);
1761 }
1762
1763 qedr_set_common_qp_params(dev, qp, pd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001764
Ram Amrani04886772016-10-10 13:15:38 +03001765 if (attrs->qp_type == IB_QPT_GSI) {
Wei Yongjun181d8012016-10-28 16:33:47 +00001766 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1767 if (IS_ERR(ibqp))
1768 kfree(qp);
1769 return ibqp;
Ram Amrani04886772016-10-10 13:15:38 +03001770 }
1771
Amrani, Ramdf158562016-12-22 14:52:24 +02001772 if (udata)
1773 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1774 else
1775 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001776
Amrani, Ramdf158562016-12-22 14:52:24 +02001777 if (rc)
1778 goto err;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001779
Ram Amranicecbcdd2016-10-10 13:15:34 +03001780 qp->ibqp.qp_num = qp->qp_id;
1781
Kalderon, Michalde0089e2017-07-26 14:41:55 +03001782 rc = qedr_idr_add(dev, qp, qp->qp_id);
1783 if (rc)
1784 goto err;
1785
Ram Amranicecbcdd2016-10-10 13:15:34 +03001786 return &qp->ibqp;
1787
Amrani, Ramdf158562016-12-22 14:52:24 +02001788err:
Ram Amranicecbcdd2016-10-10 13:15:34 +03001789 kfree(qp);
1790
1791 return ERR_PTR(-EFAULT);
1792}
1793
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001794static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001795{
1796 switch (qp_state) {
1797 case QED_ROCE_QP_STATE_RESET:
1798 return IB_QPS_RESET;
1799 case QED_ROCE_QP_STATE_INIT:
1800 return IB_QPS_INIT;
1801 case QED_ROCE_QP_STATE_RTR:
1802 return IB_QPS_RTR;
1803 case QED_ROCE_QP_STATE_RTS:
1804 return IB_QPS_RTS;
1805 case QED_ROCE_QP_STATE_SQD:
1806 return IB_QPS_SQD;
1807 case QED_ROCE_QP_STATE_ERR:
1808 return IB_QPS_ERR;
1809 case QED_ROCE_QP_STATE_SQE:
1810 return IB_QPS_SQE;
1811 }
1812 return IB_QPS_ERR;
1813}
1814
Ram Amrani27a4b1a2017-01-24 13:51:39 +02001815static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1816 enum ib_qp_state qp_state)
Ram Amranicecbcdd2016-10-10 13:15:34 +03001817{
1818 switch (qp_state) {
1819 case IB_QPS_RESET:
1820 return QED_ROCE_QP_STATE_RESET;
1821 case IB_QPS_INIT:
1822 return QED_ROCE_QP_STATE_INIT;
1823 case IB_QPS_RTR:
1824 return QED_ROCE_QP_STATE_RTR;
1825 case IB_QPS_RTS:
1826 return QED_ROCE_QP_STATE_RTS;
1827 case IB_QPS_SQD:
1828 return QED_ROCE_QP_STATE_SQD;
1829 case IB_QPS_ERR:
1830 return QED_ROCE_QP_STATE_ERR;
1831 default:
1832 return QED_ROCE_QP_STATE_ERR;
1833 }
1834}
1835
1836static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1837{
1838 qed_chain_reset(&qph->pbl);
1839 qph->prod = 0;
1840 qph->cons = 0;
1841 qph->wqe_cons = 0;
1842 qph->db_data.data.value = cpu_to_le16(0);
1843}
1844
1845static int qedr_update_qp_state(struct qedr_dev *dev,
1846 struct qedr_qp *qp,
1847 enum qed_roce_qp_state new_state)
1848{
1849 int status = 0;
1850
1851 if (new_state == qp->state)
Ram Amrani865cea42017-01-24 13:50:34 +02001852 return 0;
Ram Amranicecbcdd2016-10-10 13:15:34 +03001853
1854 switch (qp->state) {
1855 case QED_ROCE_QP_STATE_RESET:
1856 switch (new_state) {
1857 case QED_ROCE_QP_STATE_INIT:
1858 qp->prev_wqe_size = 0;
1859 qedr_reset_qp_hwq_info(&qp->sq);
1860 qedr_reset_qp_hwq_info(&qp->rq);
1861 break;
1862 default:
1863 status = -EINVAL;
1864 break;
1865 };
1866 break;
1867 case QED_ROCE_QP_STATE_INIT:
1868 switch (new_state) {
1869 case QED_ROCE_QP_STATE_RTR:
1870 /* Update doorbell (in case post_recv was
1871 * done before move to RTR)
1872 */
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001873
1874 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1875 wmb();
1876 writel(qp->rq.db_data.raw, qp->rq.db);
1877 /* Make sure write takes effect */
1878 mmiowb();
1879 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001880 break;
1881 case QED_ROCE_QP_STATE_ERR:
1882 break;
1883 default:
1884 /* Invalid state change. */
1885 status = -EINVAL;
1886 break;
1887 };
1888 break;
1889 case QED_ROCE_QP_STATE_RTR:
1890 /* RTR->XXX */
1891 switch (new_state) {
1892 case QED_ROCE_QP_STATE_RTS:
1893 break;
1894 case QED_ROCE_QP_STATE_ERR:
1895 break;
1896 default:
1897 /* Invalid state change. */
1898 status = -EINVAL;
1899 break;
1900 };
1901 break;
1902 case QED_ROCE_QP_STATE_RTS:
1903 /* RTS->XXX */
1904 switch (new_state) {
1905 case QED_ROCE_QP_STATE_SQD:
1906 break;
1907 case QED_ROCE_QP_STATE_ERR:
1908 break;
1909 default:
1910 /* Invalid state change. */
1911 status = -EINVAL;
1912 break;
1913 };
1914 break;
1915 case QED_ROCE_QP_STATE_SQD:
1916 /* SQD->XXX */
1917 switch (new_state) {
1918 case QED_ROCE_QP_STATE_RTS:
1919 case QED_ROCE_QP_STATE_ERR:
1920 break;
1921 default:
1922 /* Invalid state change. */
1923 status = -EINVAL;
1924 break;
1925 };
1926 break;
1927 case QED_ROCE_QP_STATE_ERR:
1928 /* ERR->XXX */
1929 switch (new_state) {
1930 case QED_ROCE_QP_STATE_RESET:
Ram Amrani933e6dc2017-01-24 13:50:38 +02001931 if ((qp->rq.prod != qp->rq.cons) ||
1932 (qp->sq.prod != qp->sq.cons)) {
1933 DP_NOTICE(dev,
1934 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1935 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1936 qp->sq.cons);
1937 status = -EINVAL;
1938 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001939 break;
1940 default:
1941 status = -EINVAL;
1942 break;
1943 };
1944 break;
1945 default:
1946 status = -EINVAL;
1947 break;
1948 };
1949
1950 return status;
1951}
1952
1953int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1954 int attr_mask, struct ib_udata *udata)
1955{
1956 struct qedr_qp *qp = get_qedr_qp(ibqp);
1957 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1958 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001959 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03001960 enum ib_qp_state old_qp_state, new_qp_state;
1961 int rc = 0;
1962
1963 DP_DEBUG(dev, QEDR_MSG_QP,
1964 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1965 attr->qp_state);
1966
1967 old_qp_state = qedr_get_ibqp_state(qp->state);
1968 if (attr_mask & IB_QP_STATE)
1969 new_qp_state = attr->qp_state;
1970 else
1971 new_qp_state = old_qp_state;
1972
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03001973 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1974 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
1975 ibqp->qp_type, attr_mask,
1976 IB_LINK_LAYER_ETHERNET)) {
1977 DP_ERR(dev,
1978 "modify qp: invalid attribute mask=0x%x specified for\n"
1979 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1980 attr_mask, qp->qp_id, ibqp->qp_type,
1981 old_qp_state, new_qp_state);
1982 rc = -EINVAL;
1983 goto err;
1984 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03001985 }
1986
1987 /* Translate the masks... */
1988 if (attr_mask & IB_QP_STATE) {
1989 SET_FIELD(qp_params.modify_flags,
1990 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1991 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1992 }
1993
1994 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1995 qp_params.sqd_async = true;
1996
1997 if (attr_mask & IB_QP_PKEY_INDEX) {
1998 SET_FIELD(qp_params.modify_flags,
1999 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2000 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2001 rc = -EINVAL;
2002 goto err;
2003 }
2004
2005 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2006 }
2007
2008 if (attr_mask & IB_QP_QKEY)
2009 qp->qkey = attr->qkey;
2010
2011 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2012 SET_FIELD(qp_params.modify_flags,
2013 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2014 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2015 IB_ACCESS_REMOTE_READ;
2016 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2017 IB_ACCESS_REMOTE_WRITE;
2018 qp_params.incoming_atomic_en = attr->qp_access_flags &
2019 IB_ACCESS_REMOTE_ATOMIC;
2020 }
2021
2022 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2023 if (attr_mask & IB_QP_PATH_MTU) {
2024 if (attr->path_mtu < IB_MTU_256 ||
2025 attr->path_mtu > IB_MTU_4096) {
2026 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2027 rc = -EINVAL;
2028 goto err;
2029 }
2030 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2031 ib_mtu_enum_to_int(iboe_get_mtu
2032 (dev->ndev->mtu)));
2033 }
2034
2035 if (!qp->mtu) {
2036 qp->mtu =
2037 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2038 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2039 }
2040
2041 SET_FIELD(qp_params.modify_flags,
2042 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2043
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002044 qp_params.traffic_class_tos = grh->traffic_class;
2045 qp_params.flow_label = grh->flow_label;
2046 qp_params.hop_limit_ttl = grh->hop_limit;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002047
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002048 qp->sgid_idx = grh->sgid_index;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002049
2050 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2051 if (rc) {
2052 DP_ERR(dev,
2053 "modify qp: problems with GID index %d (rc=%d)\n",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002054 grh->sgid_index, rc);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002055 return rc;
2056 }
2057
2058 rc = qedr_get_dmac(dev, &attr->ah_attr,
2059 qp_params.remote_mac_addr);
2060 if (rc)
2061 return rc;
2062
2063 qp_params.use_local_mac = true;
2064 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2065
2066 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2067 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2068 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2069 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2070 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2071 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2072 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2073 qp_params.remote_mac_addr);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002074
2075 qp_params.mtu = qp->mtu;
2076 qp_params.lb_indication = false;
2077 }
2078
2079 if (!qp_params.mtu) {
2080 /* Stay with current MTU */
2081 if (qp->mtu)
2082 qp_params.mtu = qp->mtu;
2083 else
2084 qp_params.mtu =
2085 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2086 }
2087
2088 if (attr_mask & IB_QP_TIMEOUT) {
2089 SET_FIELD(qp_params.modify_flags,
2090 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2091
2092 qp_params.ack_timeout = attr->timeout;
2093 if (attr->timeout) {
2094 u32 temp;
2095
2096 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
2097 /* FW requires [msec] */
2098 qp_params.ack_timeout = temp;
2099 } else {
2100 /* Infinite */
2101 qp_params.ack_timeout = 0;
2102 }
2103 }
2104 if (attr_mask & IB_QP_RETRY_CNT) {
2105 SET_FIELD(qp_params.modify_flags,
2106 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2107 qp_params.retry_cnt = attr->retry_cnt;
2108 }
2109
2110 if (attr_mask & IB_QP_RNR_RETRY) {
2111 SET_FIELD(qp_params.modify_flags,
2112 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2113 qp_params.rnr_retry_cnt = attr->rnr_retry;
2114 }
2115
2116 if (attr_mask & IB_QP_RQ_PSN) {
2117 SET_FIELD(qp_params.modify_flags,
2118 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2119 qp_params.rq_psn = attr->rq_psn;
2120 qp->rq_psn = attr->rq_psn;
2121 }
2122
2123 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2124 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2125 rc = -EINVAL;
2126 DP_ERR(dev,
2127 "unsupported max_rd_atomic=%d, supported=%d\n",
2128 attr->max_rd_atomic,
2129 dev->attr.max_qp_req_rd_atomic_resc);
2130 goto err;
2131 }
2132
2133 SET_FIELD(qp_params.modify_flags,
2134 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2135 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2136 }
2137
2138 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2139 SET_FIELD(qp_params.modify_flags,
2140 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2141 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2142 }
2143
2144 if (attr_mask & IB_QP_SQ_PSN) {
2145 SET_FIELD(qp_params.modify_flags,
2146 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2147 qp_params.sq_psn = attr->sq_psn;
2148 qp->sq_psn = attr->sq_psn;
2149 }
2150
2151 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2152 if (attr->max_dest_rd_atomic >
2153 dev->attr.max_qp_resp_rd_atomic_resc) {
2154 DP_ERR(dev,
2155 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2156 attr->max_dest_rd_atomic,
2157 dev->attr.max_qp_resp_rd_atomic_resc);
2158
2159 rc = -EINVAL;
2160 goto err;
2161 }
2162
2163 SET_FIELD(qp_params.modify_flags,
2164 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2165 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2166 }
2167
2168 if (attr_mask & IB_QP_DEST_QPN) {
2169 SET_FIELD(qp_params.modify_flags,
2170 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2171
2172 qp_params.dest_qp = attr->dest_qp_num;
2173 qp->dest_qp_num = attr->dest_qp_num;
2174 }
2175
2176 if (qp->qp_type != IB_QPT_GSI)
2177 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2178 qp->qed_qp, &qp_params);
2179
2180 if (attr_mask & IB_QP_STATE) {
2181 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
Amrani, Ramd6ebbf22016-12-22 14:40:35 +02002182 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002183 qp->state = qp_params.new_state;
2184 }
2185
2186err:
2187 return rc;
2188}
2189
2190static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2191{
2192 int ib_qp_acc_flags = 0;
2193
2194 if (params->incoming_rdma_write_en)
2195 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2196 if (params->incoming_rdma_read_en)
2197 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2198 if (params->incoming_atomic_en)
2199 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2200 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2201 return ib_qp_acc_flags;
2202}
2203
2204int qedr_query_qp(struct ib_qp *ibqp,
2205 struct ib_qp_attr *qp_attr,
2206 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2207{
2208 struct qed_rdma_query_qp_out_params params;
2209 struct qedr_qp *qp = get_qedr_qp(ibqp);
2210 struct qedr_dev *dev = qp->dev;
2211 int rc = 0;
2212
2213 memset(&params, 0, sizeof(params));
2214
2215 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2216 if (rc)
2217 goto err;
2218
2219 memset(qp_attr, 0, sizeof(*qp_attr));
2220 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2221
2222 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2223 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
Amrani, Ram097b6152016-12-26 08:40:58 +02002224 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002225 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2226 qp_attr->rq_psn = params.rq_psn;
2227 qp_attr->sq_psn = params.sq_psn;
2228 qp_attr->dest_qp_num = params.dest_qp;
2229
2230 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2231
2232 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2233 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2234 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2235 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Ram Amrani59e89702017-01-24 13:50:35 +02002236 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002237 qp_init_attr->cap = qp_attr->cap;
2238
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04002239 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002240 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2241 params.flow_label, qp->sgid_idx,
2242 params.hop_limit_ttl, params.traffic_class_tos);
2243 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2244 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2245 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002246 qp_attr->timeout = params.timeout;
2247 qp_attr->rnr_retry = params.rnr_retry;
2248 qp_attr->retry_cnt = params.retry_cnt;
2249 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2250 qp_attr->pkey_index = params.pkey_index;
2251 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002252 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2253 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002254 qp_attr->alt_pkey_index = 0;
2255 qp_attr->alt_port_num = 0;
2256 qp_attr->alt_timeout = 0;
2257 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2258
2259 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2260 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2261 qp_attr->max_rd_atomic = params.max_rd_atomic;
2262 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2263
2264 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2265 qp_attr->cap.max_inline_data);
2266
2267err:
2268 return rc;
2269}
2270
Bart Van Assche00899852017-10-11 10:49:17 -07002271static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
Amrani, Ramdf158562016-12-22 14:52:24 +02002272{
2273 int rc = 0;
2274
2275 if (qp->qp_type != IB_QPT_GSI) {
2276 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2277 if (rc)
2278 return rc;
2279 }
2280
2281 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2282 qedr_cleanup_user(dev, qp);
2283 else
2284 qedr_cleanup_kernel(dev, qp);
2285
2286 return 0;
2287}
2288
Ram Amranicecbcdd2016-10-10 13:15:34 +03002289int qedr_destroy_qp(struct ib_qp *ibqp)
2290{
2291 struct qedr_qp *qp = get_qedr_qp(ibqp);
2292 struct qedr_dev *dev = qp->dev;
2293 struct ib_qp_attr attr;
2294 int attr_mask = 0;
2295 int rc = 0;
2296
2297 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2298 qp, qp->qp_type);
2299
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002300 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2301 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2302 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2303 (qp->state != QED_ROCE_QP_STATE_INIT)) {
Amrani, Ramb4c2cc42016-12-22 14:40:36 +02002304
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002305 attr.qp_state = IB_QPS_ERR;
2306 attr_mask |= IB_QP_STATE;
Ram Amranicecbcdd2016-10-10 13:15:34 +03002307
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03002308 /* Change the QP state to ERROR */
2309 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2310 }
Kalderon, Michale411e052017-07-26 14:41:56 +03002311 } else {
2312 /* Wait for the connect/accept to complete */
2313 if (qp->ep) {
2314 int wait_count = 1;
2315
2316 while (qp->ep->during_connect) {
2317 DP_DEBUG(dev, QEDR_MSG_QP,
2318 "Still in during connect/accept\n");
2319
2320 msleep(100);
2321 if (wait_count++ > 200) {
2322 DP_NOTICE(dev,
2323 "during connect timeout\n");
2324 break;
2325 }
2326 }
2327 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002328 }
2329
Amrani, Ramdf158562016-12-22 14:52:24 +02002330 if (qp->qp_type == IB_QPT_GSI)
Ram Amrani04886772016-10-10 13:15:38 +03002331 qedr_destroy_gsi_qp(dev);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002332
Amrani, Ramdf158562016-12-22 14:52:24 +02002333 qedr_free_qp_resources(dev, qp);
Ram Amranicecbcdd2016-10-10 13:15:34 +03002334
Kalderon, Michalde0089e2017-07-26 14:41:55 +03002335 if (atomic_dec_and_test(&qp->refcnt)) {
2336 qedr_idr_remove(dev, qp->qp_id);
2337 kfree(qp);
2338 }
Ram Amranicecbcdd2016-10-10 13:15:34 +03002339 return rc;
2340}
Ram Amranie0290cc2016-10-10 13:15:35 +03002341
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002342struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002343 struct ib_udata *udata)
Ram Amrani04886772016-10-10 13:15:38 +03002344{
2345 struct qedr_ah *ah;
2346
2347 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2348 if (!ah)
2349 return ERR_PTR(-ENOMEM);
2350
2351 ah->attr = *attr;
2352
2353 return &ah->ibah;
2354}
2355
2356int qedr_destroy_ah(struct ib_ah *ibah)
2357{
2358 struct qedr_ah *ah = get_qedr_ah(ibah);
2359
2360 kfree(ah);
2361 return 0;
2362}
2363
Ram Amranie0290cc2016-10-10 13:15:35 +03002364static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2365{
2366 struct qedr_pbl *pbl, *tmp;
2367
2368 if (info->pbl_table)
2369 list_add_tail(&info->pbl_table->list_entry,
2370 &info->free_pbl_list);
2371
2372 if (!list_empty(&info->inuse_pbl_list))
2373 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2374
2375 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2376 list_del(&pbl->list_entry);
2377 qedr_free_pbl(dev, &info->pbl_info, pbl);
2378 }
2379}
2380
2381static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2382 size_t page_list_len, bool two_layered)
2383{
2384 struct qedr_pbl *tmp;
2385 int rc;
2386
2387 INIT_LIST_HEAD(&info->free_pbl_list);
2388 INIT_LIST_HEAD(&info->inuse_pbl_list);
2389
2390 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2391 page_list_len, two_layered);
2392 if (rc)
2393 goto done;
2394
2395 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002396 if (IS_ERR(info->pbl_table)) {
2397 rc = PTR_ERR(info->pbl_table);
Ram Amranie0290cc2016-10-10 13:15:35 +03002398 goto done;
2399 }
2400
2401 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2402 &info->pbl_table->pa);
2403
2404 /* in usual case we use 2 PBLs, so we add one to free
2405 * list and allocating another one
2406 */
2407 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
Christophe Jaillet4cd33aa2017-02-18 12:28:15 +01002408 if (IS_ERR(tmp)) {
Ram Amranie0290cc2016-10-10 13:15:35 +03002409 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2410 goto done;
2411 }
2412
2413 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2414
2415 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2416
2417done:
2418 if (rc)
2419 free_mr_info(dev, info);
2420
2421 return rc;
2422}
2423
2424struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2425 u64 usr_addr, int acc, struct ib_udata *udata)
2426{
2427 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2428 struct qedr_mr *mr;
2429 struct qedr_pd *pd;
2430 int rc = -ENOMEM;
2431
2432 pd = get_qedr_pd(ibpd);
2433 DP_DEBUG(dev, QEDR_MSG_MR,
2434 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2435 pd->pd_id, start, len, usr_addr, acc);
2436
2437 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2438 return ERR_PTR(-EINVAL);
2439
2440 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2441 if (!mr)
2442 return ERR_PTR(rc);
2443
2444 mr->type = QEDR_MR_USER;
2445
2446 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2447 if (IS_ERR(mr->umem)) {
2448 rc = -EFAULT;
2449 goto err0;
2450 }
2451
2452 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2453 if (rc)
2454 goto err1;
2455
2456 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
Ram Amranie57bb6b2017-06-05 16:32:27 +03002457 &mr->info.pbl_info, mr->umem->page_shift);
Ram Amranie0290cc2016-10-10 13:15:35 +03002458
2459 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2460 if (rc) {
2461 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2462 goto err1;
2463 }
2464
2465 /* Index only, 18 bit long, lkey = itid << 8 | key */
2466 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2467 mr->hw_mr.key = 0;
2468 mr->hw_mr.pd = pd->pd_id;
2469 mr->hw_mr.local_read = 1;
2470 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2471 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2472 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2473 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2474 mr->hw_mr.mw_bind = false;
2475 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2476 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2477 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03002478 mr->hw_mr.page_size_log = mr->umem->page_shift;
Ram Amranie0290cc2016-10-10 13:15:35 +03002479 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2480 mr->hw_mr.length = len;
2481 mr->hw_mr.vaddr = usr_addr;
2482 mr->hw_mr.zbva = false;
2483 mr->hw_mr.phy_mr = false;
2484 mr->hw_mr.dma_mr = false;
2485
2486 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2487 if (rc) {
2488 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2489 goto err2;
2490 }
2491
2492 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2493 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2494 mr->hw_mr.remote_atomic)
2495 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2496
2497 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2498 mr->ibmr.lkey);
2499 return &mr->ibmr;
2500
2501err2:
2502 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2503err1:
2504 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2505err0:
2506 kfree(mr);
2507 return ERR_PTR(rc);
2508}
2509
2510int qedr_dereg_mr(struct ib_mr *ib_mr)
2511{
2512 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2513 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2514 int rc = 0;
2515
2516 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2517 if (rc)
2518 return rc;
2519
2520 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2521
2522 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2523 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2524
2525 /* it could be user registered memory. */
2526 if (mr->umem)
2527 ib_umem_release(mr->umem);
2528
2529 kfree(mr);
2530
2531 return rc;
2532}
2533
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002534static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2535 int max_page_list_len)
Ram Amranie0290cc2016-10-10 13:15:35 +03002536{
2537 struct qedr_pd *pd = get_qedr_pd(ibpd);
2538 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2539 struct qedr_mr *mr;
2540 int rc = -ENOMEM;
2541
2542 DP_DEBUG(dev, QEDR_MSG_MR,
2543 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2544 max_page_list_len);
2545
2546 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2547 if (!mr)
2548 return ERR_PTR(rc);
2549
2550 mr->dev = dev;
2551 mr->type = QEDR_MR_FRMR;
2552
2553 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2554 if (rc)
2555 goto err0;
2556
2557 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2558 if (rc) {
2559 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2560 goto err0;
2561 }
2562
2563 /* Index only, 18 bit long, lkey = itid << 8 | key */
2564 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2565 mr->hw_mr.key = 0;
2566 mr->hw_mr.pd = pd->pd_id;
2567 mr->hw_mr.local_read = 1;
2568 mr->hw_mr.local_write = 0;
2569 mr->hw_mr.remote_read = 0;
2570 mr->hw_mr.remote_write = 0;
2571 mr->hw_mr.remote_atomic = 0;
2572 mr->hw_mr.mw_bind = false;
2573 mr->hw_mr.pbl_ptr = 0;
2574 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2575 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2576 mr->hw_mr.fbo = 0;
2577 mr->hw_mr.length = 0;
2578 mr->hw_mr.vaddr = 0;
2579 mr->hw_mr.zbva = false;
2580 mr->hw_mr.phy_mr = true;
2581 mr->hw_mr.dma_mr = false;
2582
2583 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2584 if (rc) {
2585 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2586 goto err1;
2587 }
2588
2589 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2590 mr->ibmr.rkey = mr->ibmr.lkey;
2591
2592 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2593 return mr;
2594
2595err1:
2596 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2597err0:
2598 kfree(mr);
2599 return ERR_PTR(rc);
2600}
2601
2602struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2603 enum ib_mr_type mr_type, u32 max_num_sg)
2604{
2605 struct qedr_dev *dev;
2606 struct qedr_mr *mr;
2607
2608 if (mr_type != IB_MR_TYPE_MEM_REG)
2609 return ERR_PTR(-EINVAL);
2610
2611 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2612
2613 if (IS_ERR(mr))
2614 return ERR_PTR(-EINVAL);
2615
2616 dev = mr->dev;
2617
2618 return &mr->ibmr;
2619}
2620
2621static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2622{
2623 struct qedr_mr *mr = get_qedr_mr(ibmr);
2624 struct qedr_pbl *pbl_table;
2625 struct regpair *pbe;
2626 u32 pbes_in_page;
2627
2628 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2629 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2630 return -ENOMEM;
2631 }
2632
2633 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2634 mr->npages, addr);
2635
2636 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2637 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2638 pbe = (struct regpair *)pbl_table->va;
2639 pbe += mr->npages % pbes_in_page;
2640 pbe->lo = cpu_to_le32((u32)addr);
2641 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2642
2643 mr->npages++;
2644
2645 return 0;
2646}
2647
2648static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2649{
2650 int work = info->completed - info->completed_handled - 1;
2651
2652 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2653 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2654 struct qedr_pbl *pbl;
2655
2656 /* Free all the page list that are possible to be freed
2657 * (all the ones that were invalidated), under the assumption
2658 * that if an FMR was completed successfully that means that
2659 * if there was an invalidate operation before it also ended
2660 */
2661 pbl = list_first_entry(&info->inuse_pbl_list,
2662 struct qedr_pbl, list_entry);
Wei Yongjunaafec382016-10-29 16:19:53 +00002663 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
Ram Amranie0290cc2016-10-10 13:15:35 +03002664 info->completed_handled++;
2665 }
2666}
2667
2668int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2669 int sg_nents, unsigned int *sg_offset)
2670{
2671 struct qedr_mr *mr = get_qedr_mr(ibmr);
2672
2673 mr->npages = 0;
2674
2675 handle_completed_mrs(mr->dev, &mr->info);
2676 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2677}
2678
2679struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2680{
2681 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2682 struct qedr_pd *pd = get_qedr_pd(ibpd);
2683 struct qedr_mr *mr;
2684 int rc;
2685
2686 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2687 if (!mr)
2688 return ERR_PTR(-ENOMEM);
2689
2690 mr->type = QEDR_MR_DMA;
2691
2692 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2693 if (rc) {
2694 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2695 goto err1;
2696 }
2697
2698 /* index only, 18 bit long, lkey = itid << 8 | key */
2699 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2700 mr->hw_mr.pd = pd->pd_id;
2701 mr->hw_mr.local_read = 1;
2702 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2703 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2704 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2705 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2706 mr->hw_mr.dma_mr = true;
2707
2708 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2709 if (rc) {
2710 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2711 goto err2;
2712 }
2713
2714 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2715 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2716 mr->hw_mr.remote_atomic)
2717 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2718
2719 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2720 return &mr->ibmr;
2721
2722err2:
2723 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2724err1:
2725 kfree(mr);
2726 return ERR_PTR(rc);
2727}
Ram Amraniafa0e132016-10-10 13:15:36 +03002728
2729static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2730{
2731 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2732}
2733
2734static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2735{
2736 int i, len = 0;
2737
2738 for (i = 0; i < num_sge; i++)
2739 len += sg_list[i].length;
2740
2741 return len;
2742}
2743
2744static void swap_wqe_data64(u64 *p)
2745{
2746 int i;
2747
2748 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2749 *p = cpu_to_be64(cpu_to_le64(*p));
2750}
2751
2752static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2753 struct qedr_qp *qp, u8 *wqe_size,
2754 struct ib_send_wr *wr,
2755 struct ib_send_wr **bad_wr, u8 *bits,
2756 u8 bit)
2757{
2758 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2759 char *seg_prt, *wqe;
2760 int i, seg_siz;
2761
2762 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2763 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2764 *bad_wr = wr;
2765 return 0;
2766 }
2767
2768 if (!data_size)
2769 return data_size;
2770
2771 *bits |= bit;
2772
2773 seg_prt = NULL;
2774 wqe = NULL;
2775 seg_siz = 0;
2776
2777 /* Copy data inline */
2778 for (i = 0; i < wr->num_sge; i++) {
2779 u32 len = wr->sg_list[i].length;
2780 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2781
2782 while (len > 0) {
2783 u32 cur;
2784
2785 /* New segment required */
2786 if (!seg_siz) {
2787 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2788 seg_prt = wqe;
2789 seg_siz = sizeof(struct rdma_sq_common_wqe);
2790 (*wqe_size)++;
2791 }
2792
2793 /* Calculate currently allowed length */
2794 cur = min_t(u32, len, seg_siz);
2795 memcpy(seg_prt, src, cur);
2796
2797 /* Update segment variables */
2798 seg_prt += cur;
2799 seg_siz -= cur;
2800
2801 /* Update sge variables */
2802 src += cur;
2803 len -= cur;
2804
2805 /* Swap fully-completed segments */
2806 if (!seg_siz)
2807 swap_wqe_data64((u64 *)wqe);
2808 }
2809 }
2810
2811 /* swap last not completed segment */
2812 if (seg_siz)
2813 swap_wqe_data64((u64 *)wqe);
2814
2815 return data_size;
2816}
2817
2818#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2819 do { \
2820 DMA_REGPAIR_LE(sge->addr, vaddr); \
2821 (sge)->length = cpu_to_le32(vlength); \
2822 (sge)->flags = cpu_to_le32(vflags); \
2823 } while (0)
2824
2825#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2826 do { \
2827 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2828 (hdr)->num_sges = num_sge; \
2829 } while (0)
2830
2831#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2832 do { \
2833 DMA_REGPAIR_LE(sge->addr, vaddr); \
2834 (sge)->length = cpu_to_le32(vlength); \
2835 (sge)->l_key = cpu_to_le32(vlkey); \
2836 } while (0)
2837
2838static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2839 struct ib_send_wr *wr)
2840{
2841 u32 data_size = 0;
2842 int i;
2843
2844 for (i = 0; i < wr->num_sge; i++) {
2845 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2846
2847 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2848 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2849 sge->length = cpu_to_le32(wr->sg_list[i].length);
2850 data_size += wr->sg_list[i].length;
2851 }
2852
2853 if (wqe_size)
2854 *wqe_size += wr->num_sge;
2855
2856 return data_size;
2857}
2858
2859static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2860 struct qedr_qp *qp,
2861 struct rdma_sq_rdma_wqe_1st *rwqe,
2862 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2863 struct ib_send_wr *wr,
2864 struct ib_send_wr **bad_wr)
2865{
2866 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2867 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2868
Amrani, Ram8b0cabc2016-12-22 14:40:37 +02002869 if (wr->send_flags & IB_SEND_INLINE &&
2870 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2871 wr->opcode == IB_WR_RDMA_WRITE)) {
Ram Amraniafa0e132016-10-10 13:15:36 +03002872 u8 flags = 0;
2873
2874 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2875 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2876 bad_wr, &rwqe->flags, flags);
2877 }
2878
2879 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2880}
2881
2882static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2883 struct qedr_qp *qp,
2884 struct rdma_sq_send_wqe_1st *swqe,
2885 struct rdma_sq_send_wqe_2st *swqe2,
2886 struct ib_send_wr *wr,
2887 struct ib_send_wr **bad_wr)
2888{
2889 memset(swqe2, 0, sizeof(*swqe2));
2890 if (wr->send_flags & IB_SEND_INLINE) {
2891 u8 flags = 0;
2892
2893 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2894 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2895 bad_wr, &swqe->flags, flags);
2896 }
2897
2898 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2899}
2900
2901static int qedr_prepare_reg(struct qedr_qp *qp,
2902 struct rdma_sq_fmr_wqe_1st *fwqe1,
2903 struct ib_reg_wr *wr)
2904{
2905 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2906 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2907
2908 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2909 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2910 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2911 fwqe1->l_key = wr->key;
2912
Amrani, Ram08c4cf512017-04-27 13:35:31 +03002913 fwqe2->access_ctrl = 0;
2914
Ram Amraniafa0e132016-10-10 13:15:36 +03002915 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2916 !!(wr->access & IB_ACCESS_REMOTE_READ));
2917 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2918 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2919 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2920 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2921 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2922 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2923 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2924 fwqe2->fmr_ctrl = 0;
2925
2926 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2927 ilog2(mr->ibmr.page_size) - 12);
2928
2929 fwqe2->length_hi = 0;
2930 fwqe2->length_lo = mr->ibmr.length;
2931 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2932 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2933
2934 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2935
2936 return 0;
2937}
2938
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002939static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
Ram Amraniafa0e132016-10-10 13:15:36 +03002940{
2941 switch (opcode) {
2942 case IB_WR_RDMA_WRITE:
2943 case IB_WR_RDMA_WRITE_WITH_IMM:
2944 return IB_WC_RDMA_WRITE;
2945 case IB_WR_SEND_WITH_IMM:
2946 case IB_WR_SEND:
2947 case IB_WR_SEND_WITH_INV:
2948 return IB_WC_SEND;
2949 case IB_WR_RDMA_READ:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03002950 case IB_WR_RDMA_READ_WITH_INV:
Ram Amraniafa0e132016-10-10 13:15:36 +03002951 return IB_WC_RDMA_READ;
2952 case IB_WR_ATOMIC_CMP_AND_SWP:
2953 return IB_WC_COMP_SWAP;
2954 case IB_WR_ATOMIC_FETCH_AND_ADD:
2955 return IB_WC_FETCH_ADD;
2956 case IB_WR_REG_MR:
2957 return IB_WC_REG_MR;
2958 case IB_WR_LOCAL_INV:
2959 return IB_WC_LOCAL_INV;
2960 default:
2961 return IB_WC_SEND;
2962 }
2963}
2964
Ram Amrani27a4b1a2017-01-24 13:51:39 +02002965static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
Ram Amraniafa0e132016-10-10 13:15:36 +03002966{
2967 int wq_is_full, err_wr, pbl_is_full;
2968 struct qedr_dev *dev = qp->dev;
2969
2970 /* prevent SQ overflow and/or processing of a bad WR */
2971 err_wr = wr->num_sge > qp->sq.max_sges;
2972 wq_is_full = qedr_wq_is_full(&qp->sq);
2973 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2974 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2975 if (wq_is_full || err_wr || pbl_is_full) {
2976 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2977 DP_ERR(dev,
2978 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2979 qp);
2980 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2981 }
2982
2983 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2984 DP_ERR(dev,
2985 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2986 qp);
2987 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2988 }
2989
2990 if (pbl_is_full &&
2991 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2992 DP_ERR(dev,
2993 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2994 qp);
2995 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2996 }
2997 return false;
2998 }
2999 return true;
3000}
3001
Ram Amrani27a4b1a2017-01-24 13:51:39 +02003002static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
Ram Amraniafa0e132016-10-10 13:15:36 +03003003 struct ib_send_wr **bad_wr)
3004{
3005 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3006 struct qedr_qp *qp = get_qedr_qp(ibqp);
3007 struct rdma_sq_atomic_wqe_1st *awqe1;
3008 struct rdma_sq_atomic_wqe_2nd *awqe2;
3009 struct rdma_sq_atomic_wqe_3rd *awqe3;
3010 struct rdma_sq_send_wqe_2st *swqe2;
3011 struct rdma_sq_local_inv_wqe *iwqe;
3012 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3013 struct rdma_sq_send_wqe_1st *swqe;
3014 struct rdma_sq_rdma_wqe_1st *rwqe;
3015 struct rdma_sq_fmr_wqe_1st *fwqe1;
3016 struct rdma_sq_common_wqe *wqe;
3017 u32 length;
3018 int rc = 0;
3019 bool comp;
3020
3021 if (!qedr_can_post_send(qp, wr)) {
3022 *bad_wr = wr;
3023 return -ENOMEM;
3024 }
3025
3026 wqe = qed_chain_produce(&qp->sq.pbl);
3027 qp->wqe_wr_id[qp->sq.prod].signaled =
3028 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3029
3030 wqe->flags = 0;
3031 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3032 !!(wr->send_flags & IB_SEND_SOLICITED));
3033 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3034 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3035 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3036 !!(wr->send_flags & IB_SEND_FENCE));
3037 wqe->prev_wqe_size = qp->prev_wqe_size;
3038
3039 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3040
3041 switch (wr->opcode) {
3042 case IB_WR_SEND_WITH_IMM:
3043 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3044 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3045 swqe->wqe_size = 2;
3046 swqe2 = qed_chain_produce(&qp->sq.pbl);
3047
3048 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
3049 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3050 wr, bad_wr);
3051 swqe->length = cpu_to_le32(length);
3052 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3053 qp->prev_wqe_size = swqe->wqe_size;
3054 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3055 break;
3056 case IB_WR_SEND:
3057 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3058 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3059
3060 swqe->wqe_size = 2;
3061 swqe2 = qed_chain_produce(&qp->sq.pbl);
3062 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3063 wr, bad_wr);
3064 swqe->length = cpu_to_le32(length);
3065 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3066 qp->prev_wqe_size = swqe->wqe_size;
3067 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3068 break;
3069 case IB_WR_SEND_WITH_INV:
3070 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3071 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3072 swqe2 = qed_chain_produce(&qp->sq.pbl);
3073 swqe->wqe_size = 2;
3074 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3075 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3076 wr, bad_wr);
3077 swqe->length = cpu_to_le32(length);
3078 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3079 qp->prev_wqe_size = swqe->wqe_size;
3080 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3081 break;
3082
3083 case IB_WR_RDMA_WRITE_WITH_IMM:
3084 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3085 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3086
3087 rwqe->wqe_size = 2;
3088 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3089 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3090 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3091 wr, bad_wr);
3092 rwqe->length = cpu_to_le32(length);
3093 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3094 qp->prev_wqe_size = rwqe->wqe_size;
3095 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3096 break;
3097 case IB_WR_RDMA_WRITE:
3098 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3099 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3100
3101 rwqe->wqe_size = 2;
3102 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3103 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3104 wr, bad_wr);
3105 rwqe->length = cpu_to_le32(length);
3106 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3107 qp->prev_wqe_size = rwqe->wqe_size;
3108 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3109 break;
3110 case IB_WR_RDMA_READ_WITH_INV:
Kalderon, Michalfb1a22b2017-07-26 14:41:54 +03003111 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3112 /* fallthrough... same is identical to RDMA READ */
Ram Amraniafa0e132016-10-10 13:15:36 +03003113
3114 case IB_WR_RDMA_READ:
3115 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3116 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3117
3118 rwqe->wqe_size = 2;
3119 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3120 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3121 wr, bad_wr);
3122 rwqe->length = cpu_to_le32(length);
3123 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3124 qp->prev_wqe_size = rwqe->wqe_size;
3125 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3126 break;
3127
3128 case IB_WR_ATOMIC_CMP_AND_SWP:
3129 case IB_WR_ATOMIC_FETCH_AND_ADD:
3130 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3131 awqe1->wqe_size = 4;
3132
3133 awqe2 = qed_chain_produce(&qp->sq.pbl);
3134 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3135 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3136
3137 awqe3 = qed_chain_produce(&qp->sq.pbl);
3138
3139 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3140 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3141 DMA_REGPAIR_LE(awqe3->swap_data,
3142 atomic_wr(wr)->compare_add);
3143 } else {
3144 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3145 DMA_REGPAIR_LE(awqe3->swap_data,
3146 atomic_wr(wr)->swap);
3147 DMA_REGPAIR_LE(awqe3->cmp_data,
3148 atomic_wr(wr)->compare_add);
3149 }
3150
3151 qedr_prepare_sq_sges(qp, NULL, wr);
3152
3153 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3154 qp->prev_wqe_size = awqe1->wqe_size;
3155 break;
3156
3157 case IB_WR_LOCAL_INV:
3158 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3159 iwqe->wqe_size = 1;
3160
3161 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3162 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3163 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3164 qp->prev_wqe_size = iwqe->wqe_size;
3165 break;
3166 case IB_WR_REG_MR:
3167 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3168 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3169 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3170 fwqe1->wqe_size = 2;
3171
3172 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3173 if (rc) {
3174 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3175 *bad_wr = wr;
3176 break;
3177 }
3178
3179 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3180 qp->prev_wqe_size = fwqe1->wqe_size;
3181 break;
3182 default:
3183 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3184 rc = -EINVAL;
3185 *bad_wr = wr;
3186 break;
3187 }
3188
3189 if (*bad_wr) {
3190 u16 value;
3191
3192 /* Restore prod to its position before
3193 * this WR was processed
3194 */
3195 value = le16_to_cpu(qp->sq.db_data.data.value);
3196 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3197
3198 /* Restore prev_wqe_size */
3199 qp->prev_wqe_size = wqe->prev_wqe_size;
3200 rc = -EINVAL;
3201 DP_ERR(dev, "POST SEND FAILED\n");
3202 }
3203
3204 return rc;
3205}
3206
3207int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3208 struct ib_send_wr **bad_wr)
3209{
3210 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3211 struct qedr_qp *qp = get_qedr_qp(ibqp);
3212 unsigned long flags;
3213 int rc = 0;
3214
3215 *bad_wr = NULL;
3216
Ram Amrani04886772016-10-10 13:15:38 +03003217 if (qp->qp_type == IB_QPT_GSI)
3218 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3219
Ram Amraniafa0e132016-10-10 13:15:36 +03003220 spin_lock_irqsave(&qp->q_lock, flags);
3221
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003222 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3223 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3224 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3225 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3226 spin_unlock_irqrestore(&qp->q_lock, flags);
3227 *bad_wr = wr;
3228 DP_DEBUG(dev, QEDR_MSG_CQ,
3229 "QP in wrong state! QP icid=0x%x state %d\n",
3230 qp->icid, qp->state);
3231 return -EINVAL;
3232 }
Ram Amraniafa0e132016-10-10 13:15:36 +03003233 }
3234
Ram Amraniafa0e132016-10-10 13:15:36 +03003235 while (wr) {
3236 rc = __qedr_post_send(ibqp, wr, bad_wr);
3237 if (rc)
3238 break;
3239
3240 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3241
3242 qedr_inc_sw_prod(&qp->sq);
3243
3244 qp->sq.db_data.data.value++;
3245
3246 wr = wr->next;
3247 }
3248
3249 /* Trigger doorbell
3250 * If there was a failure in the first WR then it will be triggered in
3251 * vane. However this is not harmful (as long as the producer value is
3252 * unchanged). For performance reasons we avoid checking for this
3253 * redundant doorbell.
3254 */
3255 wmb();
3256 writel(qp->sq.db_data.raw, qp->sq.db);
3257
3258 /* Make sure write sticks */
3259 mmiowb();
3260
3261 spin_unlock_irqrestore(&qp->q_lock, flags);
3262
3263 return rc;
3264}
3265
3266int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3267 struct ib_recv_wr **bad_wr)
3268{
3269 struct qedr_qp *qp = get_qedr_qp(ibqp);
3270 struct qedr_dev *dev = qp->dev;
3271 unsigned long flags;
3272 int status = 0;
3273
Ram Amrani04886772016-10-10 13:15:38 +03003274 if (qp->qp_type == IB_QPT_GSI)
3275 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3276
Ram Amraniafa0e132016-10-10 13:15:36 +03003277 spin_lock_irqsave(&qp->q_lock, flags);
3278
Amrani, Ram922d9a42016-12-22 14:40:38 +02003279 if (qp->state == QED_ROCE_QP_STATE_RESET) {
Ram Amraniafa0e132016-10-10 13:15:36 +03003280 spin_unlock_irqrestore(&qp->q_lock, flags);
3281 *bad_wr = wr;
3282 return -EINVAL;
3283 }
3284
3285 while (wr) {
3286 int i;
3287
3288 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3289 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3290 wr->num_sge > qp->rq.max_sges) {
3291 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3292 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3293 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3294 qp->rq.max_sges);
3295 status = -ENOMEM;
3296 *bad_wr = wr;
3297 break;
3298 }
3299 for (i = 0; i < wr->num_sge; i++) {
3300 u32 flags = 0;
3301 struct rdma_rq_sge *rqe =
3302 qed_chain_produce(&qp->rq.pbl);
3303
3304 /* First one must include the number
3305 * of SGE in the list
3306 */
3307 if (!i)
3308 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3309 wr->num_sge);
3310
3311 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3312 wr->sg_list[i].lkey);
3313
3314 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3315 wr->sg_list[i].length, flags);
3316 }
3317
3318 /* Special case of no sges. FW requires between 1-4 sges...
3319 * in this case we need to post 1 sge with length zero. this is
3320 * because rdma write with immediate consumes an RQ.
3321 */
3322 if (!wr->num_sge) {
3323 u32 flags = 0;
3324 struct rdma_rq_sge *rqe =
3325 qed_chain_produce(&qp->rq.pbl);
3326
3327 /* First one must include the number
3328 * of SGE in the list
3329 */
3330 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3331 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3332
3333 RQ_SGE_SET(rqe, 0, 0, flags);
3334 i = 1;
3335 }
3336
3337 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3338 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3339
3340 qedr_inc_sw_prod(&qp->rq);
3341
3342 /* Flush all the writes before signalling doorbell */
3343 wmb();
3344
3345 qp->rq.db_data.data.value++;
3346
3347 writel(qp->rq.db_data.raw, qp->rq.db);
3348
3349 /* Make sure write sticks */
3350 mmiowb();
3351
Kalderon, Michalf5b1b172017-07-26 14:41:53 +03003352 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3353 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3354 mmiowb(); /* for second doorbell */
3355 }
3356
Ram Amraniafa0e132016-10-10 13:15:36 +03003357 wr = wr->next;
3358 }
3359
3360 spin_unlock_irqrestore(&qp->q_lock, flags);
3361
3362 return status;
3363}
3364
3365static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3366{
3367 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3368
3369 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3370 cq->pbl_toggle;
3371}
3372
3373static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3374{
3375 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3376 struct qedr_qp *qp;
3377
3378 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3379 resp_cqe->qp_handle.lo,
3380 u64);
3381 return qp;
3382}
3383
3384static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3385{
3386 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3387
3388 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3389}
3390
3391/* Return latest CQE (needs processing) */
3392static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3393{
3394 return cq->latest_cqe;
3395}
3396
3397/* In fmr we need to increase the number of fmr completed counter for the fmr
3398 * algorithm determining whether we can free a pbl or not.
3399 * we need to perform this whether the work request was signaled or not. for
3400 * this purpose we call this function from the condition that checks if a wr
3401 * should be skipped, to make sure we don't miss it ( possibly this fmr
3402 * operation was not signalted)
3403 */
3404static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3405{
3406 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3407 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3408}
3409
3410static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3411 struct qedr_cq *cq, int num_entries,
3412 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3413 int force)
3414{
3415 u16 cnt = 0;
3416
3417 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3418 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3419 qedr_chk_if_fmr(qp);
3420 /* skip WC */
3421 goto next_cqe;
3422 }
3423
3424 /* fill WC */
3425 wc->status = status;
Amrani, Ram27035a12016-12-22 14:40:39 +02003426 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003427 wc->wc_flags = 0;
3428 wc->src_qp = qp->id;
3429 wc->qp = &qp->ibqp;
3430
3431 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3432 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3433
3434 switch (wc->opcode) {
3435 case IB_WC_RDMA_WRITE:
3436 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3437 break;
3438 case IB_WC_COMP_SWAP:
3439 case IB_WC_FETCH_ADD:
3440 wc->byte_len = 8;
3441 break;
3442 case IB_WC_REG_MR:
3443 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3444 break;
Michal Kalderondac27382017-06-05 16:32:26 +03003445 case IB_WC_RDMA_READ:
3446 case IB_WC_SEND:
3447 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3448 break;
Ram Amraniafa0e132016-10-10 13:15:36 +03003449 default:
3450 break;
3451 }
3452
3453 num_entries--;
3454 wc++;
3455 cnt++;
3456next_cqe:
3457 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3458 qed_chain_consume(&qp->sq.pbl);
3459 qedr_inc_sw_cons(&qp->sq);
3460 }
3461
3462 return cnt;
3463}
3464
3465static int qedr_poll_cq_req(struct qedr_dev *dev,
3466 struct qedr_qp *qp, struct qedr_cq *cq,
3467 int num_entries, struct ib_wc *wc,
3468 struct rdma_cqe_requester *req)
3469{
3470 int cnt = 0;
3471
3472 switch (req->status) {
3473 case RDMA_CQE_REQ_STS_OK:
3474 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3475 IB_WC_SUCCESS, 0);
3476 break;
3477 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
Ram Amranic78c3142017-01-24 13:50:37 +02003478 if (qp->state != QED_ROCE_QP_STATE_ERR)
3479 DP_ERR(dev,
3480 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3481 cq->icid, qp->icid);
Ram Amraniafa0e132016-10-10 13:15:36 +03003482 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
Amrani, Ram74c38752016-12-22 14:40:40 +02003483 IB_WC_WR_FLUSH_ERR, 1);
Ram Amraniafa0e132016-10-10 13:15:36 +03003484 break;
3485 default:
3486 /* process all WQE before the cosumer */
3487 qp->state = QED_ROCE_QP_STATE_ERR;
3488 cnt = process_req(dev, qp, cq, num_entries, wc,
3489 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3490 wc += cnt;
3491 /* if we have extra WC fill it with actual error info */
3492 if (cnt < num_entries) {
3493 enum ib_wc_status wc_status;
3494
3495 switch (req->status) {
3496 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3497 DP_ERR(dev,
3498 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3499 cq->icid, qp->icid);
3500 wc_status = IB_WC_BAD_RESP_ERR;
3501 break;
3502 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3503 DP_ERR(dev,
3504 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3505 cq->icid, qp->icid);
3506 wc_status = IB_WC_LOC_LEN_ERR;
3507 break;
3508 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3509 DP_ERR(dev,
3510 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3511 cq->icid, qp->icid);
3512 wc_status = IB_WC_LOC_QP_OP_ERR;
3513 break;
3514 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3515 DP_ERR(dev,
3516 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3517 cq->icid, qp->icid);
3518 wc_status = IB_WC_LOC_PROT_ERR;
3519 break;
3520 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3521 DP_ERR(dev,
3522 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3523 cq->icid, qp->icid);
3524 wc_status = IB_WC_MW_BIND_ERR;
3525 break;
3526 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3527 DP_ERR(dev,
3528 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3529 cq->icid, qp->icid);
3530 wc_status = IB_WC_REM_INV_REQ_ERR;
3531 break;
3532 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3533 DP_ERR(dev,
3534 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3535 cq->icid, qp->icid);
3536 wc_status = IB_WC_REM_ACCESS_ERR;
3537 break;
3538 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3539 DP_ERR(dev,
3540 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3541 cq->icid, qp->icid);
3542 wc_status = IB_WC_REM_OP_ERR;
3543 break;
3544 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3545 DP_ERR(dev,
3546 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3547 cq->icid, qp->icid);
3548 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3549 break;
3550 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3551 DP_ERR(dev,
3552 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3553 cq->icid, qp->icid);
3554 wc_status = IB_WC_RETRY_EXC_ERR;
3555 break;
3556 default:
3557 DP_ERR(dev,
3558 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3559 cq->icid, qp->icid);
3560 wc_status = IB_WC_GENERAL_ERR;
3561 }
3562 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3563 wc_status, 1);
3564 }
3565 }
3566
3567 return cnt;
3568}
3569
Amrani, Ramb6acd712017-04-27 13:35:35 +03003570static inline int qedr_cqe_resp_status_to_ib(u8 status)
3571{
3572 switch (status) {
3573 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3574 return IB_WC_LOC_ACCESS_ERR;
3575 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3576 return IB_WC_LOC_LEN_ERR;
3577 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3578 return IB_WC_LOC_QP_OP_ERR;
3579 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3580 return IB_WC_LOC_PROT_ERR;
3581 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3582 return IB_WC_MW_BIND_ERR;
3583 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3584 return IB_WC_REM_INV_RD_REQ_ERR;
3585 case RDMA_CQE_RESP_STS_OK:
3586 return IB_WC_SUCCESS;
3587 default:
3588 return IB_WC_GENERAL_ERR;
3589 }
3590}
3591
3592static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3593 struct ib_wc *wc)
3594{
3595 wc->status = IB_WC_SUCCESS;
3596 wc->byte_len = le32_to_cpu(resp->length);
3597
3598 if (resp->flags & QEDR_RESP_IMM) {
3599 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3600 wc->wc_flags |= IB_WC_WITH_IMM;
3601
3602 if (resp->flags & QEDR_RESP_RDMA)
3603 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3604
3605 if (resp->flags & QEDR_RESP_INV)
3606 return -EINVAL;
3607
3608 } else if (resp->flags & QEDR_RESP_INV) {
3609 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3610 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3611
3612 if (resp->flags & QEDR_RESP_RDMA)
3613 return -EINVAL;
3614
3615 } else if (resp->flags & QEDR_RESP_RDMA) {
3616 return -EINVAL;
3617 }
3618
3619 return 0;
3620}
3621
Ram Amraniafa0e132016-10-10 13:15:36 +03003622static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3623 struct qedr_cq *cq, struct ib_wc *wc,
3624 struct rdma_cqe_responder *resp, u64 wr_id)
3625{
Amrani, Ramb6acd712017-04-27 13:35:35 +03003626 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
Ram Amraniafa0e132016-10-10 13:15:36 +03003627 wc->opcode = IB_WC_RECV;
3628 wc->wc_flags = 0;
3629
Amrani, Ramb6acd712017-04-27 13:35:35 +03003630 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3631 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3632 DP_ERR(dev,
3633 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3634 cq, cq->icid, resp->flags);
Ram Amraniafa0e132016-10-10 13:15:36 +03003635
Amrani, Ramb6acd712017-04-27 13:35:35 +03003636 } else {
3637 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3638 if (wc->status == IB_WC_GENERAL_ERR)
3639 DP_ERR(dev,
3640 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3641 cq, cq->icid, resp->status);
Ram Amraniafa0e132016-10-10 13:15:36 +03003642 }
3643
Amrani, Ramb6acd712017-04-27 13:35:35 +03003644 /* Fill the rest of the WC */
Amrani, Ram27035a12016-12-22 14:40:39 +02003645 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003646 wc->src_qp = qp->id;
3647 wc->qp = &qp->ibqp;
3648 wc->wr_id = wr_id;
3649}
3650
3651static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3652 struct qedr_cq *cq, struct ib_wc *wc,
3653 struct rdma_cqe_responder *resp)
3654{
3655 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3656
3657 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3658
3659 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3660 qed_chain_consume(&qp->rq.pbl);
3661 qedr_inc_sw_cons(&qp->rq);
3662
3663 return 1;
3664}
3665
3666static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3667 int num_entries, struct ib_wc *wc, u16 hw_cons)
3668{
3669 u16 cnt = 0;
3670
3671 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3672 /* fill WC */
3673 wc->status = IB_WC_WR_FLUSH_ERR;
Amrani, Ram27035a12016-12-22 14:40:39 +02003674 wc->vendor_err = 0;
Ram Amraniafa0e132016-10-10 13:15:36 +03003675 wc->wc_flags = 0;
3676 wc->src_qp = qp->id;
3677 wc->byte_len = 0;
3678 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3679 wc->qp = &qp->ibqp;
3680 num_entries--;
3681 wc++;
3682 cnt++;
3683 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3684 qed_chain_consume(&qp->rq.pbl);
3685 qedr_inc_sw_cons(&qp->rq);
3686 }
3687
3688 return cnt;
3689}
3690
3691static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3692 struct rdma_cqe_responder *resp, int *update)
3693{
3694 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3695 consume_cqe(cq);
3696 *update |= 1;
3697 }
3698}
3699
3700static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3701 struct qedr_cq *cq, int num_entries,
3702 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3703 int *update)
3704{
3705 int cnt;
3706
3707 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3708 cnt = process_resp_flush(qp, cq, num_entries, wc,
3709 resp->rq_cons);
3710 try_consume_resp_cqe(cq, qp, resp, update);
3711 } else {
3712 cnt = process_resp_one(dev, qp, cq, wc, resp);
3713 consume_cqe(cq);
3714 *update |= 1;
3715 }
3716
3717 return cnt;
3718}
3719
3720static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3721 struct rdma_cqe_requester *req, int *update)
3722{
3723 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3724 consume_cqe(cq);
3725 *update |= 1;
3726 }
3727}
3728
3729int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3730{
3731 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3732 struct qedr_cq *cq = get_qedr_cq(ibcq);
3733 union rdma_cqe *cqe = cq->latest_cqe;
3734 u32 old_cons, new_cons;
3735 unsigned long flags;
3736 int update = 0;
3737 int done = 0;
3738
Amrani, Ram4dd72632017-04-27 13:35:34 +03003739 if (cq->destroyed) {
3740 DP_ERR(dev,
3741 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3742 cq, cq->icid);
3743 return 0;
3744 }
3745
Ram Amrani04886772016-10-10 13:15:38 +03003746 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3747 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3748
Ram Amraniafa0e132016-10-10 13:15:36 +03003749 spin_lock_irqsave(&cq->cq_lock, flags);
3750 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3751 while (num_entries && is_valid_cqe(cq, cqe)) {
3752 struct qedr_qp *qp;
3753 int cnt = 0;
3754
3755 /* prevent speculative reads of any field of CQE */
3756 rmb();
3757
3758 qp = cqe_get_qp(cqe);
3759 if (!qp) {
3760 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3761 break;
3762 }
3763
3764 wc->qp = &qp->ibqp;
3765
3766 switch (cqe_get_type(cqe)) {
3767 case RDMA_CQE_TYPE_REQUESTER:
3768 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3769 &cqe->req);
3770 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3771 break;
3772 case RDMA_CQE_TYPE_RESPONDER_RQ:
3773 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3774 &cqe->resp, &update);
3775 break;
3776 case RDMA_CQE_TYPE_INVALID:
3777 default:
3778 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3779 cqe_get_type(cqe));
3780 }
3781 num_entries -= cnt;
3782 wc += cnt;
3783 done += cnt;
3784
3785 cqe = get_cqe(cq);
3786 }
3787 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3788
3789 cq->cq_cons += new_cons - old_cons;
3790
3791 if (update)
3792 /* doorbell notifies abount latest VALID entry,
3793 * but chain already point to the next INVALID one
3794 */
3795 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3796
3797 spin_unlock_irqrestore(&cq->cq_lock, flags);
3798 return done;
3799}
Ram Amrani993d1b52016-10-10 13:15:39 +03003800
3801int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3802 u8 port_num,
3803 const struct ib_wc *in_wc,
3804 const struct ib_grh *in_grh,
3805 const struct ib_mad_hdr *mad_hdr,
3806 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3807 size_t *out_mad_size, u16 *out_mad_pkey_index)
3808{
3809 struct qedr_dev *dev = get_qedr_dev(ibdev);
3810
3811 DP_DEBUG(dev, QEDR_MSG_GSI,
3812 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3813 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3814 mad_hdr->class_specific, mad_hdr->class_version,
3815 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3816 return IB_MAD_RESULT_SUCCESS;
3817}