blob: 6d5f405912dd28c2bb25434614c06c3a52cb19dd [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +000036#include <linux/mlx4/srq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070038
39#include "mlx4_ib.h"
Leon Romanovsky9ce28a22016-09-22 17:31:14 +030040#include <rdma/mlx4-abi.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070041
42static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
43{
44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
46}
47
48static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
49{
50 struct ib_event event;
51 struct ib_cq *ibcq;
52
53 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +030054 pr_warn("Unexpected event type %d "
Roland Dreier225c7b12007-05-08 18:00:38 -070055 "on CQ %06x\n", type, cq->cqn);
56 return;
57 }
58
59 ibcq = &to_mibcq(cq)->ibcq;
60 if (ibcq->event_handler) {
61 event.device = ibcq->device;
62 event.event = IB_EVENT_CQ_ERR;
63 event.element.cq = ibcq;
64 ibcq->event_handler(&event, ibcq->cq_context);
65 }
66}
67
68static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
69{
Or Gerlitz08ff3232012-10-21 14:59:24 +000070 return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
Roland Dreier225c7b12007-05-08 18:00:38 -070071}
72
73static void *get_cqe(struct mlx4_ib_cq *cq, int n)
74{
75 return get_cqe_from_buf(&cq->buf, n);
76}
77
78static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
79{
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +000081 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
Roland Dreier225c7b12007-05-08 18:00:38 -070082
Or Gerlitz08ff3232012-10-21 14:59:24 +000083 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
Roland Dreier225c7b12007-05-08 18:00:38 -070084 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
85}
86
87static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
88{
89 return get_sw_cqe(cq, cq->mcq.cons_index);
90}
91
Eli Cohen3fdcb972008-04-16 21:09:33 -070092int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
93{
94 struct mlx4_ib_cq *mcq = to_mcq(cq);
95 struct mlx4_ib_dev *dev = to_mdev(cq->device);
96
97 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
98}
99
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700100static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
101{
102 int err;
103
Or Gerlitz08ff3232012-10-21 14:59:24 +0000104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
Leon Romanovsky8900b892017-05-23 14:38:15 +0300105 PAGE_SIZE * 2, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700106
107 if (err)
108 goto out;
109
Or Gerlitz08ff3232012-10-21 14:59:24 +0000110 buf->entry_size = dev->dev->caps.cqe_size;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
112 &buf->mtt);
113 if (err)
114 goto err_buf;
115
Leon Romanovsky8900b892017-05-23 14:38:15 +0300116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700117 if (err)
118 goto err_mtt;
119
120 return 0;
121
122err_mtt:
123 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
124
125err_buf:
Or Gerlitz08ff3232012-10-21 14:59:24 +0000126 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700127
128out:
129 return err;
130}
131
132static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
133{
Or Gerlitz08ff3232012-10-21 14:59:24 +0000134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700135}
136
137static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
139 u64 buf_addr, int cqe)
140{
141 int err;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000142 int cqe_size = dev->dev->caps.cqe_size;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700143
Or Gerlitz08ff3232012-10-21 14:59:24 +0000144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
Arthur Kepnercb9fbc52008-04-29 01:00:34 -0700145 IB_ACCESS_LOCAL_WRITE, 1);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700146 if (IS_ERR(*umem))
147 return PTR_ERR(*umem);
148
149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300150 (*umem)->page_shift, &buf->mtt);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700151 if (err)
152 goto err_buf;
153
154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
155 if (err)
156 goto err_mtt;
157
158 return 0;
159
160err_mtt:
161 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
162
163err_buf:
164 ib_umem_release(*umem);
165
166 return err;
167}
168
Matan Barak4b664c42015-06-11 16:35:27 +0300169#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300170struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
171 const struct ib_cq_init_attr *attr,
Roland Dreier225c7b12007-05-08 18:00:38 -0700172 struct ib_ucontext *context,
173 struct ib_udata *udata)
174{
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300175 int entries = attr->cqe;
176 int vector = attr->comp_vector;
Roland Dreier225c7b12007-05-08 18:00:38 -0700177 struct mlx4_ib_dev *dev = to_mdev(ibdev);
178 struct mlx4_ib_cq *cq;
179 struct mlx4_uar *uar;
Roland Dreier225c7b12007-05-08 18:00:38 -0700180 int err;
181
Matan Barak4b664c42015-06-11 16:35:27 +0300182 if (entries < 1 || entries > dev->dev->caps.max_cqes)
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300183 return ERR_PTR(-EINVAL);
184
Matan Barak4b664c42015-06-11 16:35:27 +0300185 if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
Roland Dreier225c7b12007-05-08 18:00:38 -0700186 return ERR_PTR(-EINVAL);
187
188 cq = kmalloc(sizeof *cq, GFP_KERNEL);
189 if (!cq)
190 return ERR_PTR(-ENOMEM);
191
192 entries = roundup_pow_of_two(entries + 1);
193 cq->ibcq.cqe = entries - 1;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700194 mutex_init(&cq->resize_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -0700195 spin_lock_init(&cq->lock);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700196 cq->resize_buf = NULL;
197 cq->resize_umem = NULL;
Matan Barak4b664c42015-06-11 16:35:27 +0300198 cq->create_flags = attr->flags;
Yishai Hadas35f05da2015-02-08 11:49:34 +0200199 INIT_LIST_HEAD(&cq->send_qp_list);
200 INIT_LIST_HEAD(&cq->recv_qp_list);
Roland Dreier225c7b12007-05-08 18:00:38 -0700201
202 if (context) {
203 struct mlx4_ib_create_cq ucmd;
204
205 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
206 err = -EFAULT;
207 goto err_cq;
208 }
209
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700210 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
211 ucmd.buf_addr, entries);
212 if (err)
Roland Dreier225c7b12007-05-08 18:00:38 -0700213 goto err_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700214
215 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
216 &cq->db);
217 if (err)
218 goto err_mtt;
219
220 uar = &to_mucontext(context)->uar;
Moshe Shemeshf3301872017-06-21 09:29:36 +0300221 cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700222 } else {
Leon Romanovsky8900b892017-05-23 14:38:15 +0300223 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
Roland Dreier225c7b12007-05-08 18:00:38 -0700224 if (err)
225 goto err_cq;
226
227 cq->mcq.set_ci_db = cq->db.db;
228 cq->mcq.arm_db = cq->db.db + 1;
229 *cq->mcq.set_ci_db = 0;
230 *cq->mcq.arm_db = 0;
231
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700232 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
233 if (err)
Roland Dreier225c7b12007-05-08 18:00:38 -0700234 goto err_db;
Roland Dreier225c7b12007-05-08 18:00:38 -0700235
236 uar = &dev->priv_uar;
Moshe Shemeshf3301872017-06-21 09:29:36 +0300237 cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
Roland Dreier225c7b12007-05-08 18:00:38 -0700238 }
239
Shlomo Pongratze605b742012-04-29 17:04:27 +0300240 if (dev->eq_table)
241 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
242
Roland Dreier225c7b12007-05-08 18:00:38 -0700243 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
Matan Barak4b664c42015-06-11 16:35:27 +0300244 cq->db.dma, &cq->mcq, vector, 0,
245 !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
Roland Dreier225c7b12007-05-08 18:00:38 -0700246 if (err)
247 goto err_dbmap;
248
Matan Barak3dca0f422014-12-11 10:57:53 +0200249 if (context)
250 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
251 else
252 cq->mcq.comp = mlx4_ib_cq_comp;
Roland Dreier225c7b12007-05-08 18:00:38 -0700253 cq->mcq.event = mlx4_ib_cq_event;
254
255 if (context)
256 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
257 err = -EFAULT;
Matan Barak593ff732016-11-10 11:30:55 +0200258 goto err_cq_free;
Roland Dreier225c7b12007-05-08 18:00:38 -0700259 }
260
261 return &cq->ibcq;
262
Matan Barak593ff732016-11-10 11:30:55 +0200263err_cq_free:
264 mlx4_cq_free(dev->dev, &cq->mcq);
265
Roland Dreier225c7b12007-05-08 18:00:38 -0700266err_dbmap:
267 if (context)
268 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
269
270err_mtt:
271 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
272
Roland Dreier225c7b12007-05-08 18:00:38 -0700273 if (context)
274 ib_umem_release(cq->umem);
275 else
Roland Dreier3ae15e12008-04-30 19:52:55 -0700276 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
Roland Dreier225c7b12007-05-08 18:00:38 -0700277
278err_db:
279 if (!context)
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700280 mlx4_db_free(dev->dev, &cq->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700281
282err_cq:
283 kfree(cq);
284
285 return ERR_PTR(err);
286}
287
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700288static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
289 int entries)
290{
291 int err;
292
293 if (cq->resize_buf)
294 return -EBUSY;
295
Roland Dreier0c87b67202016-07-28 21:58:43 -0700296 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700297 if (!cq->resize_buf)
298 return -ENOMEM;
299
300 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
301 if (err) {
302 kfree(cq->resize_buf);
303 cq->resize_buf = NULL;
304 return err;
305 }
306
307 cq->resize_buf->cqe = entries - 1;
308
309 return 0;
310}
311
312static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
313 int entries, struct ib_udata *udata)
314{
315 struct mlx4_ib_resize_cq ucmd;
316 int err;
317
318 if (cq->resize_umem)
319 return -EBUSY;
320
321 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
322 return -EFAULT;
323
Roland Dreier0c87b67202016-07-28 21:58:43 -0700324 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700325 if (!cq->resize_buf)
326 return -ENOMEM;
327
328 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
329 &cq->resize_umem, ucmd.buf_addr, entries);
330 if (err) {
331 kfree(cq->resize_buf);
332 cq->resize_buf = NULL;
333 return err;
334 }
335
336 cq->resize_buf->cqe = entries - 1;
337
338 return 0;
339}
340
341static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
342{
343 u32 i;
344
345 i = cq->mcq.cons_index;
Eli Cohen93b80ac2013-10-31 15:26:35 +0200346 while (get_sw_cqe(cq, i))
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700347 ++i;
348
349 return i - cq->mcq.cons_index;
350}
351
352static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
353{
Jack Morgenstein7798dbf2008-12-24 20:32:42 -0800354 struct mlx4_cqe *cqe, *new_cqe;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700355 int i;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000356 int cqe_size = cq->buf.entry_size;
357 int cqe_inc = cqe_size == 64 ? 1 : 0;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700358
359 i = cq->mcq.cons_index;
360 cqe = get_cqe(cq, i & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000361 cqe += cqe_inc;
362
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700363 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
Jack Morgenstein7798dbf2008-12-24 20:32:42 -0800364 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
365 (i + 1) & cq->resize_buf->cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000366 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
367 new_cqe += cqe_inc;
368
Jack Morgenstein7798dbf2008-12-24 20:32:42 -0800369 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
370 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700371 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000372 cqe += cqe_inc;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700373 }
374 ++cq->mcq.cons_index;
375}
376
377int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
378{
379 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
380 struct mlx4_ib_cq *cq = to_mcq(ibcq);
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800381 struct mlx4_mtt mtt;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700382 int outst_cqe;
383 int err;
384
385 mutex_lock(&cq->resize_mutex);
Majd Dibbiny8ab94062015-01-29 10:41:42 +0200386 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700387 err = -EINVAL;
388 goto out;
389 }
390
391 entries = roundup_pow_of_two(entries + 1);
392 if (entries == ibcq->cqe + 1) {
393 err = 0;
394 goto out;
395 }
396
Majd Dibbiny8ab94062015-01-29 10:41:42 +0200397 if (entries > dev->dev->caps.max_cqes + 1) {
Eli Cohen79d3da92013-10-31 15:26:34 +0200398 err = -EINVAL;
399 goto out;
400 }
401
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700402 if (ibcq->uobject) {
403 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
404 if (err)
405 goto out;
406 } else {
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200407 /* Can't be smaller than the number of outstanding CQEs */
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700408 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
409 if (entries < outst_cqe + 1) {
Majd Dibbiny8ab94062015-01-29 10:41:42 +0200410 err = -EINVAL;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700411 goto out;
412 }
413
414 err = mlx4_alloc_resize_buf(dev, cq, entries);
415 if (err)
416 goto out;
417 }
418
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800419 mtt = cq->buf.mtt;
420
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700421 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
422 if (err)
423 goto err_buf;
424
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800425 mlx4_mtt_cleanup(dev->dev, &mtt);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700426 if (ibcq->uobject) {
427 cq->buf = cq->resize_buf->buf;
428 cq->ibcq.cqe = cq->resize_buf->cqe;
429 ib_umem_release(cq->umem);
430 cq->umem = cq->resize_umem;
431
432 kfree(cq->resize_buf);
433 cq->resize_buf = NULL;
434 cq->resize_umem = NULL;
435 } else {
Vladimir Sokolovsky3afa9f192011-01-10 17:42:06 -0800436 struct mlx4_ib_cq_buf tmp_buf;
437 int tmp_cqe = 0;
438
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700439 spin_lock_irq(&cq->lock);
440 if (cq->resize_buf) {
441 mlx4_ib_cq_resize_copy_cqes(cq);
Vladimir Sokolovsky3afa9f192011-01-10 17:42:06 -0800442 tmp_buf = cq->buf;
443 tmp_cqe = cq->ibcq.cqe;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700444 cq->buf = cq->resize_buf->buf;
445 cq->ibcq.cqe = cq->resize_buf->cqe;
446
447 kfree(cq->resize_buf);
448 cq->resize_buf = NULL;
449 }
450 spin_unlock_irq(&cq->lock);
Vladimir Sokolovsky3afa9f192011-01-10 17:42:06 -0800451
452 if (tmp_cqe)
453 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700454 }
455
456 goto out;
457
458err_buf:
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800459 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700460 if (!ibcq->uobject)
461 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
462 cq->resize_buf->cqe);
463
464 kfree(cq->resize_buf);
465 cq->resize_buf = NULL;
466
467 if (cq->resize_umem) {
468 ib_umem_release(cq->resize_umem);
469 cq->resize_umem = NULL;
470 }
471
472out:
473 mutex_unlock(&cq->resize_mutex);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000474
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700475 return err;
476}
477
Roland Dreier225c7b12007-05-08 18:00:38 -0700478int mlx4_ib_destroy_cq(struct ib_cq *cq)
479{
480 struct mlx4_ib_dev *dev = to_mdev(cq->device);
481 struct mlx4_ib_cq *mcq = to_mcq(cq);
482
483 mlx4_cq_free(dev->dev, &mcq->mcq);
484 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
485
486 if (cq->uobject) {
487 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
488 ib_umem_release(mcq->umem);
489 } else {
Roland Dreier3ae15e12008-04-30 19:52:55 -0700490 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700491 mlx4_db_free(dev->dev, &mcq->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700492 }
493
494 kfree(mcq);
495
496 return 0;
497}
498
499static void dump_cqe(void *cqe)
500{
501 __be32 *buf = cqe;
502
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300503 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
Roland Dreier225c7b12007-05-08 18:00:38 -0700504 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
505 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
506 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
507}
508
509static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
510 struct ib_wc *wc)
511{
512 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300513 pr_debug("local QP operation err "
Roland Dreier225c7b12007-05-08 18:00:38 -0700514 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
515 "opcode = %02x)\n",
516 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
517 cqe->vendor_err_syndrome,
518 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
519 dump_cqe(cqe);
520 }
521
522 switch (cqe->syndrome) {
523 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
524 wc->status = IB_WC_LOC_LEN_ERR;
525 break;
526 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
527 wc->status = IB_WC_LOC_QP_OP_ERR;
528 break;
529 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
530 wc->status = IB_WC_LOC_PROT_ERR;
531 break;
532 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
533 wc->status = IB_WC_WR_FLUSH_ERR;
534 break;
535 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
536 wc->status = IB_WC_MW_BIND_ERR;
537 break;
538 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
539 wc->status = IB_WC_BAD_RESP_ERR;
540 break;
541 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
542 wc->status = IB_WC_LOC_ACCESS_ERR;
543 break;
544 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
545 wc->status = IB_WC_REM_INV_REQ_ERR;
546 break;
547 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
548 wc->status = IB_WC_REM_ACCESS_ERR;
549 break;
550 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
551 wc->status = IB_WC_REM_OP_ERR;
552 break;
553 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
554 wc->status = IB_WC_RETRY_EXC_ERR;
555 break;
556 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
557 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
558 break;
559 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
560 wc->status = IB_WC_REM_ABORT_ERR;
561 break;
562 default:
563 wc->status = IB_WC_GENERAL_ERR;
564 break;
565 }
566
567 wc->vendor_err = cqe->vendor_err_syndrome;
568}
569
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700570static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
Eli Cohen8ff095e2008-04-16 21:01:10 -0700571{
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700572 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
573 MLX4_CQE_STATUS_IPV4F |
574 MLX4_CQE_STATUS_IPV4OPT |
575 MLX4_CQE_STATUS_IPV6 |
576 MLX4_CQE_STATUS_IPOK)) ==
577 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
578 MLX4_CQE_STATUS_IPOK)) &&
579 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
580 MLX4_CQE_STATUS_TCP)) &&
Eli Cohen8ff095e2008-04-16 21:01:10 -0700581 checksum == cpu_to_be16(0xffff);
582}
583
Yuval Shaiae6a00f62016-07-27 01:24:52 -0700584static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
585 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000586{
587 struct mlx4_ib_proxy_sqp_hdr *hdr;
588
589 ib_dma_sync_single_for_cpu(qp->ibqp.device,
590 qp->sqp_proxy_rcv[tail].map,
591 sizeof (struct mlx4_ib_proxy_sqp_hdr),
592 DMA_FROM_DEVICE);
593 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
594 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000595 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
596 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
597 wc->dlid_path_bits = 0;
598
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200599 if (is_eth) {
600 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
601 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
602 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
603 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
604 } else {
605 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
606 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
607 }
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000608}
609
Yishai Hadas35f05da2015-02-08 11:49:34 +0200610static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
611 struct ib_wc *wc, int *npolled, int is_send)
612{
613 struct mlx4_ib_wq *wq;
614 unsigned cur;
615 int i;
616
617 wq = is_send ? &qp->sq : &qp->rq;
618 cur = wq->head - wq->tail;
619
620 if (cur == 0)
621 return;
622
623 for (i = 0; i < cur && *npolled < num_entries; i++) {
624 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
625 wc->status = IB_WC_WR_FLUSH_ERR;
626 wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
627 wq->tail++;
628 (*npolled)++;
629 wc->qp = &qp->ibqp;
630 wc++;
631 }
632}
633
634static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
635 struct ib_wc *wc, int *npolled)
636{
637 struct mlx4_ib_qp *qp;
638
639 *npolled = 0;
Talat Batheeshfaa91412017-08-17 15:50:43 +0300640 /* Find uncompleted WQEs belonging to that cq and return
Yishai Hadas35f05da2015-02-08 11:49:34 +0200641 * simulated FLUSH_ERR completions
642 */
643 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
Ariel Nahum799cdaf2015-08-09 11:16:27 +0300644 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
Yishai Hadas35f05da2015-02-08 11:49:34 +0200645 if (*npolled >= num_entries)
646 goto out;
647 }
648
649 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
650 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
651 if (*npolled >= num_entries)
652 goto out;
653 }
654
655out:
656 return;
657}
658
Roland Dreier225c7b12007-05-08 18:00:38 -0700659static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
660 struct mlx4_ib_qp **cur_qp,
661 struct ib_wc *wc)
662{
663 struct mlx4_cqe *cqe;
664 struct mlx4_qp *mqp;
665 struct mlx4_ib_wq *wq;
666 struct mlx4_ib_srq *srq;
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000667 struct mlx4_srq *msrq = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700668 int is_send;
669 int is_error;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200670 int is_eth;
Roland Dreierb3226182008-01-25 14:15:34 -0800671 u32 g_mlpath_rqpn;
Roland Dreier225c7b12007-05-08 18:00:38 -0700672 u16 wqe_ctr;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000673 unsigned tail = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700674
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700675repoll:
Roland Dreier225c7b12007-05-08 18:00:38 -0700676 cqe = next_cqe_sw(cq);
677 if (!cqe)
678 return -EAGAIN;
679
Or Gerlitz08ff3232012-10-21 14:59:24 +0000680 if (cq->buf.entry_size == 64)
681 cqe++;
682
Roland Dreier225c7b12007-05-08 18:00:38 -0700683 ++cq->mcq.cons_index;
684
685 /*
686 * Make sure we read CQ entry contents after we've checked the
687 * ownership bit.
688 */
689 rmb();
690
691 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
692 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
693 MLX4_CQE_OPCODE_ERROR;
694
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700695 /* Resize CQ in progress */
696 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
697 if (cq->resize_buf) {
698 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
699
700 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
701 cq->buf = cq->resize_buf->buf;
702 cq->ibcq.cqe = cq->resize_buf->cqe;
703
704 kfree(cq->resize_buf);
705 cq->resize_buf = NULL;
706 }
707
708 goto repoll;
709 }
710
Roland Dreier225c7b12007-05-08 18:00:38 -0700711 if (!*cur_qp ||
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700712 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700713 /*
714 * We do not have to take the QP table lock here,
715 * because CQs will be locked while QPs are removed
716 * from the table.
717 */
718 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700719 be32_to_cpu(cqe->vlan_my_qpn));
Roland Dreier225c7b12007-05-08 18:00:38 -0700720 *cur_qp = to_mibqp(mqp);
721 }
722
723 wc->qp = &(*cur_qp)->ibqp;
724
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000725 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
726 u32 srq_num;
727 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
728 srq_num = g_mlpath_rqpn & 0xffffff;
729 /* SRQ is also in the radix tree */
730 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
731 srq_num);
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000732 }
733
Roland Dreier225c7b12007-05-08 18:00:38 -0700734 if (is_send) {
735 wq = &(*cur_qp)->sq;
Jack Morgensteinea54b102008-01-28 10:40:59 +0200736 if (!(*cur_qp)->sq_signal_bits) {
737 wqe_ctr = be16_to_cpu(cqe->wqe_index);
738 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
739 }
Roland Dreier0e6e7412007-06-18 08:13:48 -0700740 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
Roland Dreier225c7b12007-05-08 18:00:38 -0700741 ++wq->tail;
742 } else if ((*cur_qp)->ibqp.srq) {
743 srq = to_msrq((*cur_qp)->ibqp.srq);
744 wqe_ctr = be16_to_cpu(cqe->wqe_index);
745 wc->wr_id = srq->wrid[wqe_ctr];
746 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000747 } else if (msrq) {
748 srq = to_mibsrq(msrq);
749 wqe_ctr = be16_to_cpu(cqe->wqe_index);
750 wc->wr_id = srq->wrid[wqe_ctr];
751 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
Roland Dreier225c7b12007-05-08 18:00:38 -0700752 } else {
753 wq = &(*cur_qp)->rq;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000754 tail = wq->tail & (wq->wqe_cnt - 1);
755 wc->wr_id = wq->wrid[tail];
Roland Dreier225c7b12007-05-08 18:00:38 -0700756 ++wq->tail;
757 }
758
759 if (unlikely(is_error)) {
760 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
761 return 0;
762 }
763
764 wc->status = IB_WC_SUCCESS;
765
766 if (is_send) {
767 wc->wc_flags = 0;
768 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
769 case MLX4_OPCODE_RDMA_WRITE_IMM:
770 wc->wc_flags |= IB_WC_WITH_IMM;
Bart Van Assche8aff1fb2017-10-11 10:49:06 -0700771 /* fall through */
Roland Dreier225c7b12007-05-08 18:00:38 -0700772 case MLX4_OPCODE_RDMA_WRITE:
773 wc->opcode = IB_WC_RDMA_WRITE;
774 break;
775 case MLX4_OPCODE_SEND_IMM:
776 wc->wc_flags |= IB_WC_WITH_IMM;
Bart Van Assche8aff1fb2017-10-11 10:49:06 -0700777 /* fall through */
Roland Dreier225c7b12007-05-08 18:00:38 -0700778 case MLX4_OPCODE_SEND:
Roland Dreier95d04f02008-07-23 08:12:26 -0700779 case MLX4_OPCODE_SEND_INVAL:
Roland Dreier225c7b12007-05-08 18:00:38 -0700780 wc->opcode = IB_WC_SEND;
781 break;
782 case MLX4_OPCODE_RDMA_READ:
Vu Pham19891912007-08-03 14:25:48 -0700783 wc->opcode = IB_WC_RDMA_READ;
Roland Dreier225c7b12007-05-08 18:00:38 -0700784 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
785 break;
786 case MLX4_OPCODE_ATOMIC_CS:
787 wc->opcode = IB_WC_COMP_SWAP;
788 wc->byte_len = 8;
789 break;
790 case MLX4_OPCODE_ATOMIC_FA:
791 wc->opcode = IB_WC_FETCH_ADD;
792 wc->byte_len = 8;
793 break;
Vladimir Sokolovsky6fa8f712010-04-14 17:23:39 +0300794 case MLX4_OPCODE_MASKED_ATOMIC_CS:
795 wc->opcode = IB_WC_MASKED_COMP_SWAP;
796 wc->byte_len = 8;
797 break;
798 case MLX4_OPCODE_MASKED_ATOMIC_FA:
799 wc->opcode = IB_WC_MASKED_FETCH_ADD;
800 wc->byte_len = 8;
801 break;
Eli Cohenb832be12008-04-16 21:09:27 -0700802 case MLX4_OPCODE_LSO:
803 wc->opcode = IB_WC_LSO;
804 break;
Roland Dreier95d04f02008-07-23 08:12:26 -0700805 case MLX4_OPCODE_FMR:
Sagi Grimberge761c672015-10-13 19:11:43 +0300806 wc->opcode = IB_WC_REG_MR;
Roland Dreier95d04f02008-07-23 08:12:26 -0700807 break;
808 case MLX4_OPCODE_LOCAL_INVAL:
809 wc->opcode = IB_WC_LOCAL_INV;
810 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700811 }
812 } else {
813 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
814
815 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
816 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
Steve Wise00f7ec32008-07-14 23:48:45 -0700817 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
818 wc->wc_flags = IB_WC_WITH_IMM;
819 wc->ex.imm_data = cqe->immed_rss_invalid;
Roland Dreier225c7b12007-05-08 18:00:38 -0700820 break;
Roland Dreier95d04f02008-07-23 08:12:26 -0700821 case MLX4_RECV_OPCODE_SEND_INVAL:
822 wc->opcode = IB_WC_RECV;
823 wc->wc_flags = IB_WC_WITH_INVALIDATE;
824 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
825 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700826 case MLX4_RECV_OPCODE_SEND:
827 wc->opcode = IB_WC_RECV;
828 wc->wc_flags = 0;
829 break;
830 case MLX4_RECV_OPCODE_SEND_IMM:
Steve Wise00f7ec32008-07-14 23:48:45 -0700831 wc->opcode = IB_WC_RECV;
832 wc->wc_flags = IB_WC_WITH_IMM;
833 wc->ex.imm_data = cqe->immed_rss_invalid;
Roland Dreier225c7b12007-05-08 18:00:38 -0700834 break;
835 }
836
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200837 is_eth = (rdma_port_get_link_layer(wc->qp->device,
838 (*cur_qp)->port) ==
839 IB_LINK_LAYER_ETHERNET);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000840 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
841 if ((*cur_qp)->mlx4_ib_qp_type &
842 (MLX4_IB_QPT_PROXY_SMI_OWNER |
Yuval Shaiae6a00f62016-07-27 01:24:52 -0700843 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
844 use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
845 is_eth);
846 return 0;
847 }
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000848 }
849
Roland Dreier225c7b12007-05-08 18:00:38 -0700850 wc->slid = be16_to_cpu(cqe->rlid);
Roland Dreierb3226182008-01-25 14:15:34 -0800851 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
852 wc->src_qp = g_mlpath_rqpn & 0xffffff;
853 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
854 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
Dotan Barake1bb7842008-01-07 09:01:25 +0200855 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
Or Gerlitzd927d502012-01-11 19:03:51 +0200856 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
857 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200858 if (is_eth) {
Or Gerlitz9106c412011-12-11 16:40:05 +0200859 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200860 if (be32_to_cpu(cqe->vlan_my_qpn) &
Hadar Hen Zione802f8e2015-07-27 14:46:33 +0300861 MLX4_CQE_CVLAN_PRESENT_MASK) {
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200862 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
863 MLX4_CQE_VID_MASK;
864 } else {
865 wc->vlan_id = 0xffff;
866 }
867 memcpy(wc->smac, cqe->smac, ETH_ALEN);
868 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
Moni Shoua297e0da2013-12-12 18:03:14 +0200869 } else {
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200870 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
Moni Shoua297e0da2013-12-12 18:03:14 +0200871 wc->vlan_id = 0xffff;
872 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700873 }
874
875 return 0;
876}
877
878int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
879{
880 struct mlx4_ib_cq *cq = to_mcq(ibcq);
881 struct mlx4_ib_qp *cur_qp = NULL;
882 unsigned long flags;
883 int npolled;
Yishai Hadas35f05da2015-02-08 11:49:34 +0200884 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
Roland Dreier225c7b12007-05-08 18:00:38 -0700885
886 spin_lock_irqsave(&cq->lock, flags);
Yishai Hadas35f05da2015-02-08 11:49:34 +0200887 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
888 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
889 goto out;
890 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700891
892 for (npolled = 0; npolled < num_entries; ++npolled) {
Leon Romanovsky20697432016-08-28 10:58:33 +0300893 if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
Roland Dreier225c7b12007-05-08 18:00:38 -0700894 break;
895 }
896
Eli Cohen3616f9c2012-03-06 15:50:51 +0200897 mlx4_cq_set_ci(&cq->mcq);
Roland Dreier225c7b12007-05-08 18:00:38 -0700898
Yishai Hadas35f05da2015-02-08 11:49:34 +0200899out:
Roland Dreier225c7b12007-05-08 18:00:38 -0700900 spin_unlock_irqrestore(&cq->lock, flags);
901
Leon Romanovsky20697432016-08-28 10:58:33 +0300902 return npolled;
Roland Dreier225c7b12007-05-08 18:00:38 -0700903}
904
905int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
906{
907 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
908 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
909 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
910 to_mdev(ibcq->device)->uar_map,
911 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
912
913 return 0;
914}
915
916void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
917{
918 u32 prod_index;
919 int nfreed = 0;
Jack Morgenstein082dee32007-06-18 08:13:59 -0700920 struct mlx4_cqe *cqe, *dest;
921 u8 owner_bit;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000922 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700923
924 /*
925 * First we need to find the current producer index, so we
926 * know where to start cleaning from. It doesn't matter if HW
927 * adds new entries after this loop -- the QP we're worried
928 * about is already in RESET, so the new entries won't come
929 * from our QP and therefore don't need to be checked.
930 */
931 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
932 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
933 break;
934
935 /*
936 * Now sweep backwards through the CQ, removing CQ entries
937 * that match our QP by copying older entries on top of them.
938 */
939 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
940 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000941 cqe += cqe_inc;
942
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700943 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700944 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
945 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
946 ++nfreed;
Jack Morgenstein082dee32007-06-18 08:13:59 -0700947 } else if (nfreed) {
948 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000949 dest += cqe_inc;
950
Jack Morgenstein082dee32007-06-18 08:13:59 -0700951 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
952 memcpy(dest, cqe, sizeof *cqe);
953 dest->owner_sr_opcode = owner_bit |
954 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
955 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700956 }
957
958 if (nfreed) {
959 cq->mcq.cons_index += nfreed;
960 /*
961 * Make sure update of buffer contents is done before
962 * updating consumer index.
963 */
964 wmb();
965 mlx4_cq_set_ci(&cq->mcq);
966 }
967}
968
969void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
970{
971 spin_lock_irq(&cq->lock);
972 __mlx4_ib_cq_clean(cq, qpn, srq);
973 spin_unlock_irq(&cq->lock);
974}