blob: cb63ecd2276f4c47675eb69287314e85a424df35 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +000036#include <linux/mlx4/srq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070038
39#include "mlx4_ib.h"
40#include "user.h"
41
42static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
43{
44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
46}
47
48static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
49{
50 struct ib_event event;
51 struct ib_cq *ibcq;
52
53 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +030054 pr_warn("Unexpected event type %d "
Roland Dreier225c7b12007-05-08 18:00:38 -070055 "on CQ %06x\n", type, cq->cqn);
56 return;
57 }
58
59 ibcq = &to_mibcq(cq)->ibcq;
60 if (ibcq->event_handler) {
61 event.device = ibcq->device;
62 event.event = IB_EVENT_CQ_ERR;
63 event.element.cq = ibcq;
64 ibcq->event_handler(&event, ibcq->cq_context);
65 }
66}
67
68static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
69{
Or Gerlitz08ff3232012-10-21 14:59:24 +000070 return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
Roland Dreier225c7b12007-05-08 18:00:38 -070071}
72
73static void *get_cqe(struct mlx4_ib_cq *cq, int n)
74{
75 return get_cqe_from_buf(&cq->buf, n);
76}
77
78static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
79{
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +000081 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
Roland Dreier225c7b12007-05-08 18:00:38 -070082
Or Gerlitz08ff3232012-10-21 14:59:24 +000083 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
Roland Dreier225c7b12007-05-08 18:00:38 -070084 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
85}
86
87static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
88{
89 return get_sw_cqe(cq, cq->mcq.cons_index);
90}
91
Eli Cohen3fdcb972008-04-16 21:09:33 -070092int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
93{
94 struct mlx4_ib_cq *mcq = to_mcq(cq);
95 struct mlx4_ib_dev *dev = to_mdev(cq->device);
96
97 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
98}
99
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700100static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
101{
102 int err;
103
Or Gerlitz08ff3232012-10-21 14:59:24 +0000104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
Jiri Kosina40f22872014-05-11 15:15:12 +0300105 PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700106
107 if (err)
108 goto out;
109
Or Gerlitz08ff3232012-10-21 14:59:24 +0000110 buf->entry_size = dev->dev->caps.cqe_size;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
112 &buf->mtt);
113 if (err)
114 goto err_buf;
115
Jiri Kosina40f22872014-05-11 15:15:12 +0300116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700117 if (err)
118 goto err_mtt;
119
120 return 0;
121
122err_mtt:
123 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
124
125err_buf:
Or Gerlitz08ff3232012-10-21 14:59:24 +0000126 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700127
128out:
129 return err;
130}
131
132static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
133{
Or Gerlitz08ff3232012-10-21 14:59:24 +0000134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700135}
136
137static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
139 u64 buf_addr, int cqe)
140{
141 int err;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000142 int cqe_size = dev->dev->caps.cqe_size;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700143
Or Gerlitz08ff3232012-10-21 14:59:24 +0000144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
Arthur Kepnercb9fbc52008-04-29 01:00:34 -0700145 IB_ACCESS_LOCAL_WRITE, 1);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700146 if (IS_ERR(*umem))
147 return PTR_ERR(*umem);
148
149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
150 ilog2((*umem)->page_size), &buf->mtt);
151 if (err)
152 goto err_buf;
153
154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
155 if (err)
156 goto err_mtt;
157
158 return 0;
159
160err_mtt:
161 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
162
163err_buf:
164 ib_umem_release(*umem);
165
166 return err;
167}
168
Roland Dreier225c7b12007-05-08 18:00:38 -0700169struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
170 struct ib_ucontext *context,
171 struct ib_udata *udata)
172{
173 struct mlx4_ib_dev *dev = to_mdev(ibdev);
174 struct mlx4_ib_cq *cq;
175 struct mlx4_uar *uar;
Roland Dreier225c7b12007-05-08 18:00:38 -0700176 int err;
177
178 if (entries < 1 || entries > dev->dev->caps.max_cqes)
179 return ERR_PTR(-EINVAL);
180
181 cq = kmalloc(sizeof *cq, GFP_KERNEL);
182 if (!cq)
183 return ERR_PTR(-ENOMEM);
184
185 entries = roundup_pow_of_two(entries + 1);
186 cq->ibcq.cqe = entries - 1;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700187 mutex_init(&cq->resize_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -0700188 spin_lock_init(&cq->lock);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700189 cq->resize_buf = NULL;
190 cq->resize_umem = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700191
192 if (context) {
193 struct mlx4_ib_create_cq ucmd;
194
195 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
196 err = -EFAULT;
197 goto err_cq;
198 }
199
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700200 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
201 ucmd.buf_addr, entries);
202 if (err)
Roland Dreier225c7b12007-05-08 18:00:38 -0700203 goto err_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700204
205 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
206 &cq->db);
207 if (err)
208 goto err_mtt;
209
210 uar = &to_mucontext(context)->uar;
211 } else {
Jiri Kosina40f22872014-05-11 15:15:12 +0300212 err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -0700213 if (err)
214 goto err_cq;
215
216 cq->mcq.set_ci_db = cq->db.db;
217 cq->mcq.arm_db = cq->db.db + 1;
218 *cq->mcq.set_ci_db = 0;
219 *cq->mcq.arm_db = 0;
220
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700221 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
222 if (err)
Roland Dreier225c7b12007-05-08 18:00:38 -0700223 goto err_db;
Roland Dreier225c7b12007-05-08 18:00:38 -0700224
225 uar = &dev->priv_uar;
226 }
227
Shlomo Pongratze605b742012-04-29 17:04:27 +0300228 if (dev->eq_table)
229 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
230
Roland Dreier225c7b12007-05-08 18:00:38 -0700231 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
Amir Vadaiec693d42013-04-23 06:06:49 +0000232 cq->db.dma, &cq->mcq, vector, 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -0700233 if (err)
234 goto err_dbmap;
235
Matan Barak3dca0f422014-12-11 10:57:53 +0200236 if (context)
237 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
238 else
239 cq->mcq.comp = mlx4_ib_cq_comp;
Roland Dreier225c7b12007-05-08 18:00:38 -0700240 cq->mcq.event = mlx4_ib_cq_event;
241
242 if (context)
243 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
244 err = -EFAULT;
245 goto err_dbmap;
246 }
247
248 return &cq->ibcq;
249
250err_dbmap:
251 if (context)
252 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
253
254err_mtt:
255 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
256
Roland Dreier225c7b12007-05-08 18:00:38 -0700257 if (context)
258 ib_umem_release(cq->umem);
259 else
Roland Dreier3ae15e12008-04-30 19:52:55 -0700260 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
Roland Dreier225c7b12007-05-08 18:00:38 -0700261
262err_db:
263 if (!context)
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700264 mlx4_db_free(dev->dev, &cq->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700265
266err_cq:
267 kfree(cq);
268
269 return ERR_PTR(err);
270}
271
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700272static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
273 int entries)
274{
275 int err;
276
277 if (cq->resize_buf)
278 return -EBUSY;
279
280 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
281 if (!cq->resize_buf)
282 return -ENOMEM;
283
284 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
285 if (err) {
286 kfree(cq->resize_buf);
287 cq->resize_buf = NULL;
288 return err;
289 }
290
291 cq->resize_buf->cqe = entries - 1;
292
293 return 0;
294}
295
296static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
297 int entries, struct ib_udata *udata)
298{
299 struct mlx4_ib_resize_cq ucmd;
300 int err;
301
302 if (cq->resize_umem)
303 return -EBUSY;
304
305 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
306 return -EFAULT;
307
308 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
309 if (!cq->resize_buf)
310 return -ENOMEM;
311
312 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
313 &cq->resize_umem, ucmd.buf_addr, entries);
314 if (err) {
315 kfree(cq->resize_buf);
316 cq->resize_buf = NULL;
317 return err;
318 }
319
320 cq->resize_buf->cqe = entries - 1;
321
322 return 0;
323}
324
325static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
326{
327 u32 i;
328
329 i = cq->mcq.cons_index;
Eli Cohen93b80ac2013-10-31 15:26:35 +0200330 while (get_sw_cqe(cq, i))
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700331 ++i;
332
333 return i - cq->mcq.cons_index;
334}
335
336static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
337{
Jack Morgenstein7798dbf2008-12-24 20:32:42 -0800338 struct mlx4_cqe *cqe, *new_cqe;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700339 int i;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000340 int cqe_size = cq->buf.entry_size;
341 int cqe_inc = cqe_size == 64 ? 1 : 0;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700342
343 i = cq->mcq.cons_index;
344 cqe = get_cqe(cq, i & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000345 cqe += cqe_inc;
346
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700347 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
Jack Morgenstein7798dbf2008-12-24 20:32:42 -0800348 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
349 (i + 1) & cq->resize_buf->cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000350 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
351 new_cqe += cqe_inc;
352
Jack Morgenstein7798dbf2008-12-24 20:32:42 -0800353 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
354 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700355 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000356 cqe += cqe_inc;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700357 }
358 ++cq->mcq.cons_index;
359}
360
361int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
362{
363 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
364 struct mlx4_ib_cq *cq = to_mcq(ibcq);
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800365 struct mlx4_mtt mtt;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700366 int outst_cqe;
367 int err;
368
369 mutex_lock(&cq->resize_mutex);
Majd Dibbiny8ab94062015-01-29 10:41:42 +0200370 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700371 err = -EINVAL;
372 goto out;
373 }
374
375 entries = roundup_pow_of_two(entries + 1);
376 if (entries == ibcq->cqe + 1) {
377 err = 0;
378 goto out;
379 }
380
Majd Dibbiny8ab94062015-01-29 10:41:42 +0200381 if (entries > dev->dev->caps.max_cqes + 1) {
Eli Cohen79d3da92013-10-31 15:26:34 +0200382 err = -EINVAL;
383 goto out;
384 }
385
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700386 if (ibcq->uobject) {
387 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
388 if (err)
389 goto out;
390 } else {
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200391 /* Can't be smaller than the number of outstanding CQEs */
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700392 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
393 if (entries < outst_cqe + 1) {
Majd Dibbiny8ab94062015-01-29 10:41:42 +0200394 err = -EINVAL;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700395 goto out;
396 }
397
398 err = mlx4_alloc_resize_buf(dev, cq, entries);
399 if (err)
400 goto out;
401 }
402
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800403 mtt = cq->buf.mtt;
404
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700405 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
406 if (err)
407 goto err_buf;
408
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800409 mlx4_mtt_cleanup(dev->dev, &mtt);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700410 if (ibcq->uobject) {
411 cq->buf = cq->resize_buf->buf;
412 cq->ibcq.cqe = cq->resize_buf->cqe;
413 ib_umem_release(cq->umem);
414 cq->umem = cq->resize_umem;
415
416 kfree(cq->resize_buf);
417 cq->resize_buf = NULL;
418 cq->resize_umem = NULL;
419 } else {
Vladimir Sokolovsky3afa9f192011-01-10 17:42:06 -0800420 struct mlx4_ib_cq_buf tmp_buf;
421 int tmp_cqe = 0;
422
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700423 spin_lock_irq(&cq->lock);
424 if (cq->resize_buf) {
425 mlx4_ib_cq_resize_copy_cqes(cq);
Vladimir Sokolovsky3afa9f192011-01-10 17:42:06 -0800426 tmp_buf = cq->buf;
427 tmp_cqe = cq->ibcq.cqe;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700428 cq->buf = cq->resize_buf->buf;
429 cq->ibcq.cqe = cq->resize_buf->cqe;
430
431 kfree(cq->resize_buf);
432 cq->resize_buf = NULL;
433 }
434 spin_unlock_irq(&cq->lock);
Vladimir Sokolovsky3afa9f192011-01-10 17:42:06 -0800435
436 if (tmp_cqe)
437 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700438 }
439
440 goto out;
441
442err_buf:
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800443 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700444 if (!ibcq->uobject)
445 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
446 cq->resize_buf->cqe);
447
448 kfree(cq->resize_buf);
449 cq->resize_buf = NULL;
450
451 if (cq->resize_umem) {
452 ib_umem_release(cq->resize_umem);
453 cq->resize_umem = NULL;
454 }
455
456out:
457 mutex_unlock(&cq->resize_mutex);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000458
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700459 return err;
460}
461
Roland Dreier225c7b12007-05-08 18:00:38 -0700462int mlx4_ib_destroy_cq(struct ib_cq *cq)
463{
464 struct mlx4_ib_dev *dev = to_mdev(cq->device);
465 struct mlx4_ib_cq *mcq = to_mcq(cq);
466
467 mlx4_cq_free(dev->dev, &mcq->mcq);
468 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
469
470 if (cq->uobject) {
471 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
472 ib_umem_release(mcq->umem);
473 } else {
Roland Dreier3ae15e12008-04-30 19:52:55 -0700474 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700475 mlx4_db_free(dev->dev, &mcq->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700476 }
477
478 kfree(mcq);
479
480 return 0;
481}
482
483static void dump_cqe(void *cqe)
484{
485 __be32 *buf = cqe;
486
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300487 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
Roland Dreier225c7b12007-05-08 18:00:38 -0700488 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
489 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
490 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
491}
492
493static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
494 struct ib_wc *wc)
495{
496 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300497 pr_debug("local QP operation err "
Roland Dreier225c7b12007-05-08 18:00:38 -0700498 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
499 "opcode = %02x)\n",
500 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
501 cqe->vendor_err_syndrome,
502 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
503 dump_cqe(cqe);
504 }
505
506 switch (cqe->syndrome) {
507 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
508 wc->status = IB_WC_LOC_LEN_ERR;
509 break;
510 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
511 wc->status = IB_WC_LOC_QP_OP_ERR;
512 break;
513 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
514 wc->status = IB_WC_LOC_PROT_ERR;
515 break;
516 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
517 wc->status = IB_WC_WR_FLUSH_ERR;
518 break;
519 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
520 wc->status = IB_WC_MW_BIND_ERR;
521 break;
522 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
523 wc->status = IB_WC_BAD_RESP_ERR;
524 break;
525 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
526 wc->status = IB_WC_LOC_ACCESS_ERR;
527 break;
528 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
529 wc->status = IB_WC_REM_INV_REQ_ERR;
530 break;
531 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
532 wc->status = IB_WC_REM_ACCESS_ERR;
533 break;
534 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
535 wc->status = IB_WC_REM_OP_ERR;
536 break;
537 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
538 wc->status = IB_WC_RETRY_EXC_ERR;
539 break;
540 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
541 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
542 break;
543 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
544 wc->status = IB_WC_REM_ABORT_ERR;
545 break;
546 default:
547 wc->status = IB_WC_GENERAL_ERR;
548 break;
549 }
550
551 wc->vendor_err = cqe->vendor_err_syndrome;
552}
553
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700554static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
Eli Cohen8ff095e2008-04-16 21:01:10 -0700555{
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700556 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
557 MLX4_CQE_STATUS_IPV4F |
558 MLX4_CQE_STATUS_IPV4OPT |
559 MLX4_CQE_STATUS_IPV6 |
560 MLX4_CQE_STATUS_IPOK)) ==
561 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
562 MLX4_CQE_STATUS_IPOK)) &&
563 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
564 MLX4_CQE_STATUS_TCP)) &&
Eli Cohen8ff095e2008-04-16 21:01:10 -0700565 checksum == cpu_to_be16(0xffff);
566}
567
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000568static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200569 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000570{
571 struct mlx4_ib_proxy_sqp_hdr *hdr;
572
573 ib_dma_sync_single_for_cpu(qp->ibqp.device,
574 qp->sqp_proxy_rcv[tail].map,
575 sizeof (struct mlx4_ib_proxy_sqp_hdr),
576 DMA_FROM_DEVICE);
577 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
578 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000579 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
580 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
581 wc->dlid_path_bits = 0;
582
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200583 if (is_eth) {
584 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
585 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
586 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
587 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
588 } else {
589 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
590 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
591 }
592
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000593 return 0;
594}
595
Roland Dreier225c7b12007-05-08 18:00:38 -0700596static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
597 struct mlx4_ib_qp **cur_qp,
598 struct ib_wc *wc)
599{
600 struct mlx4_cqe *cqe;
601 struct mlx4_qp *mqp;
602 struct mlx4_ib_wq *wq;
603 struct mlx4_ib_srq *srq;
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000604 struct mlx4_srq *msrq = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700605 int is_send;
606 int is_error;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200607 int is_eth;
Roland Dreierb3226182008-01-25 14:15:34 -0800608 u32 g_mlpath_rqpn;
Roland Dreier225c7b12007-05-08 18:00:38 -0700609 u16 wqe_ctr;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000610 unsigned tail = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700611
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700612repoll:
Roland Dreier225c7b12007-05-08 18:00:38 -0700613 cqe = next_cqe_sw(cq);
614 if (!cqe)
615 return -EAGAIN;
616
Or Gerlitz08ff3232012-10-21 14:59:24 +0000617 if (cq->buf.entry_size == 64)
618 cqe++;
619
Roland Dreier225c7b12007-05-08 18:00:38 -0700620 ++cq->mcq.cons_index;
621
622 /*
623 * Make sure we read CQ entry contents after we've checked the
624 * ownership bit.
625 */
626 rmb();
627
628 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
629 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
630 MLX4_CQE_OPCODE_ERROR;
631
Jack Morgensteinea54b102008-01-28 10:40:59 +0200632 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
633 is_send)) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300634 pr_warn("Completion for NOP opcode detected!\n");
Jack Morgensteinea54b102008-01-28 10:40:59 +0200635 return -EINVAL;
636 }
637
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700638 /* Resize CQ in progress */
639 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
640 if (cq->resize_buf) {
641 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
642
643 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
644 cq->buf = cq->resize_buf->buf;
645 cq->ibcq.cqe = cq->resize_buf->cqe;
646
647 kfree(cq->resize_buf);
648 cq->resize_buf = NULL;
649 }
650
651 goto repoll;
652 }
653
Roland Dreier225c7b12007-05-08 18:00:38 -0700654 if (!*cur_qp ||
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700655 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700656 /*
657 * We do not have to take the QP table lock here,
658 * because CQs will be locked while QPs are removed
659 * from the table.
660 */
661 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700662 be32_to_cpu(cqe->vlan_my_qpn));
Roland Dreier225c7b12007-05-08 18:00:38 -0700663 if (unlikely(!mqp)) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300664 pr_warn("CQ %06x with entry for unknown QPN %06x\n",
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700665 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
Roland Dreier225c7b12007-05-08 18:00:38 -0700666 return -EINVAL;
667 }
668
669 *cur_qp = to_mibqp(mqp);
670 }
671
672 wc->qp = &(*cur_qp)->ibqp;
673
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000674 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
675 u32 srq_num;
676 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
677 srq_num = g_mlpath_rqpn & 0xffffff;
678 /* SRQ is also in the radix tree */
679 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
680 srq_num);
681 if (unlikely(!msrq)) {
682 pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
683 cq->mcq.cqn, srq_num);
684 return -EINVAL;
685 }
686 }
687
Roland Dreier225c7b12007-05-08 18:00:38 -0700688 if (is_send) {
689 wq = &(*cur_qp)->sq;
Jack Morgensteinea54b102008-01-28 10:40:59 +0200690 if (!(*cur_qp)->sq_signal_bits) {
691 wqe_ctr = be16_to_cpu(cqe->wqe_index);
692 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
693 }
Roland Dreier0e6e7412007-06-18 08:13:48 -0700694 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
Roland Dreier225c7b12007-05-08 18:00:38 -0700695 ++wq->tail;
696 } else if ((*cur_qp)->ibqp.srq) {
697 srq = to_msrq((*cur_qp)->ibqp.srq);
698 wqe_ctr = be16_to_cpu(cqe->wqe_index);
699 wc->wr_id = srq->wrid[wqe_ctr];
700 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000701 } else if (msrq) {
702 srq = to_mibsrq(msrq);
703 wqe_ctr = be16_to_cpu(cqe->wqe_index);
704 wc->wr_id = srq->wrid[wqe_ctr];
705 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
Roland Dreier225c7b12007-05-08 18:00:38 -0700706 } else {
707 wq = &(*cur_qp)->rq;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000708 tail = wq->tail & (wq->wqe_cnt - 1);
709 wc->wr_id = wq->wrid[tail];
Roland Dreier225c7b12007-05-08 18:00:38 -0700710 ++wq->tail;
711 }
712
713 if (unlikely(is_error)) {
714 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
715 return 0;
716 }
717
718 wc->status = IB_WC_SUCCESS;
719
720 if (is_send) {
721 wc->wc_flags = 0;
722 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
723 case MLX4_OPCODE_RDMA_WRITE_IMM:
724 wc->wc_flags |= IB_WC_WITH_IMM;
725 case MLX4_OPCODE_RDMA_WRITE:
726 wc->opcode = IB_WC_RDMA_WRITE;
727 break;
728 case MLX4_OPCODE_SEND_IMM:
729 wc->wc_flags |= IB_WC_WITH_IMM;
730 case MLX4_OPCODE_SEND:
Roland Dreier95d04f02008-07-23 08:12:26 -0700731 case MLX4_OPCODE_SEND_INVAL:
Roland Dreier225c7b12007-05-08 18:00:38 -0700732 wc->opcode = IB_WC_SEND;
733 break;
734 case MLX4_OPCODE_RDMA_READ:
Vu Pham19891912007-08-03 14:25:48 -0700735 wc->opcode = IB_WC_RDMA_READ;
Roland Dreier225c7b12007-05-08 18:00:38 -0700736 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
737 break;
738 case MLX4_OPCODE_ATOMIC_CS:
739 wc->opcode = IB_WC_COMP_SWAP;
740 wc->byte_len = 8;
741 break;
742 case MLX4_OPCODE_ATOMIC_FA:
743 wc->opcode = IB_WC_FETCH_ADD;
744 wc->byte_len = 8;
745 break;
Vladimir Sokolovsky6fa8f712010-04-14 17:23:39 +0300746 case MLX4_OPCODE_MASKED_ATOMIC_CS:
747 wc->opcode = IB_WC_MASKED_COMP_SWAP;
748 wc->byte_len = 8;
749 break;
750 case MLX4_OPCODE_MASKED_ATOMIC_FA:
751 wc->opcode = IB_WC_MASKED_FETCH_ADD;
752 wc->byte_len = 8;
753 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700754 case MLX4_OPCODE_BIND_MW:
755 wc->opcode = IB_WC_BIND_MW;
756 break;
Eli Cohenb832be12008-04-16 21:09:27 -0700757 case MLX4_OPCODE_LSO:
758 wc->opcode = IB_WC_LSO;
759 break;
Roland Dreier95d04f02008-07-23 08:12:26 -0700760 case MLX4_OPCODE_FMR:
761 wc->opcode = IB_WC_FAST_REG_MR;
762 break;
763 case MLX4_OPCODE_LOCAL_INVAL:
764 wc->opcode = IB_WC_LOCAL_INV;
765 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700766 }
767 } else {
768 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
769
770 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
771 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
Steve Wise00f7ec32008-07-14 23:48:45 -0700772 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
773 wc->wc_flags = IB_WC_WITH_IMM;
774 wc->ex.imm_data = cqe->immed_rss_invalid;
Roland Dreier225c7b12007-05-08 18:00:38 -0700775 break;
Roland Dreier95d04f02008-07-23 08:12:26 -0700776 case MLX4_RECV_OPCODE_SEND_INVAL:
777 wc->opcode = IB_WC_RECV;
778 wc->wc_flags = IB_WC_WITH_INVALIDATE;
779 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
780 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700781 case MLX4_RECV_OPCODE_SEND:
782 wc->opcode = IB_WC_RECV;
783 wc->wc_flags = 0;
784 break;
785 case MLX4_RECV_OPCODE_SEND_IMM:
Steve Wise00f7ec32008-07-14 23:48:45 -0700786 wc->opcode = IB_WC_RECV;
787 wc->wc_flags = IB_WC_WITH_IMM;
788 wc->ex.imm_data = cqe->immed_rss_invalid;
Roland Dreier225c7b12007-05-08 18:00:38 -0700789 break;
790 }
791
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200792 is_eth = (rdma_port_get_link_layer(wc->qp->device,
793 (*cur_qp)->port) ==
794 IB_LINK_LAYER_ETHERNET);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000795 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
796 if ((*cur_qp)->mlx4_ib_qp_type &
797 (MLX4_IB_QPT_PROXY_SMI_OWNER |
798 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200799 return use_tunnel_data(*cur_qp, cq, wc, tail,
800 cqe, is_eth);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000801 }
802
Roland Dreier225c7b12007-05-08 18:00:38 -0700803 wc->slid = be16_to_cpu(cqe->rlid);
Roland Dreierb3226182008-01-25 14:15:34 -0800804 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
805 wc->src_qp = g_mlpath_rqpn & 0xffffff;
806 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
807 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
Dotan Barake1bb7842008-01-07 09:01:25 +0200808 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
Or Gerlitzd927d502012-01-11 19:03:51 +0200809 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
810 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200811 if (is_eth) {
Or Gerlitz9106c412011-12-11 16:40:05 +0200812 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200813 if (be32_to_cpu(cqe->vlan_my_qpn) &
814 MLX4_CQE_VLAN_PRESENT_MASK) {
815 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
816 MLX4_CQE_VID_MASK;
817 } else {
818 wc->vlan_id = 0xffff;
819 }
820 memcpy(wc->smac, cqe->smac, ETH_ALEN);
821 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
Moni Shoua297e0da2013-12-12 18:03:14 +0200822 } else {
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200823 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
Moni Shoua297e0da2013-12-12 18:03:14 +0200824 wc->vlan_id = 0xffff;
825 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700826 }
827
828 return 0;
829}
830
831int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
832{
833 struct mlx4_ib_cq *cq = to_mcq(ibcq);
834 struct mlx4_ib_qp *cur_qp = NULL;
835 unsigned long flags;
836 int npolled;
837 int err = 0;
838
839 spin_lock_irqsave(&cq->lock, flags);
840
841 for (npolled = 0; npolled < num_entries; ++npolled) {
842 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
843 if (err)
844 break;
845 }
846
Eli Cohen3616f9c2012-03-06 15:50:51 +0200847 mlx4_cq_set_ci(&cq->mcq);
Roland Dreier225c7b12007-05-08 18:00:38 -0700848
849 spin_unlock_irqrestore(&cq->lock, flags);
850
851 if (err == 0 || err == -EAGAIN)
852 return npolled;
853 else
854 return err;
855}
856
857int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
858{
859 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
860 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
861 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
862 to_mdev(ibcq->device)->uar_map,
863 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
864
865 return 0;
866}
867
868void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
869{
870 u32 prod_index;
871 int nfreed = 0;
Jack Morgenstein082dee32007-06-18 08:13:59 -0700872 struct mlx4_cqe *cqe, *dest;
873 u8 owner_bit;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000874 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700875
876 /*
877 * First we need to find the current producer index, so we
878 * know where to start cleaning from. It doesn't matter if HW
879 * adds new entries after this loop -- the QP we're worried
880 * about is already in RESET, so the new entries won't come
881 * from our QP and therefore don't need to be checked.
882 */
883 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
884 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
885 break;
886
887 /*
888 * Now sweep backwards through the CQ, removing CQ entries
889 * that match our QP by copying older entries on top of them.
890 */
891 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
892 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000893 cqe += cqe_inc;
894
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700895 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700896 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
897 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
898 ++nfreed;
Jack Morgenstein082dee32007-06-18 08:13:59 -0700899 } else if (nfreed) {
900 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000901 dest += cqe_inc;
902
Jack Morgenstein082dee32007-06-18 08:13:59 -0700903 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
904 memcpy(dest, cqe, sizeof *cqe);
905 dest->owner_sr_opcode = owner_bit |
906 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
907 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700908 }
909
910 if (nfreed) {
911 cq->mcq.cons_index += nfreed;
912 /*
913 * Make sure update of buffer contents is done before
914 * updating consumer index.
915 */
916 wmb();
917 mlx4_cq_set_ci(&cq->mcq);
918 }
919}
920
921void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
922{
923 spin_lock_irq(&cq->lock);
924 __mlx4_ib_cq_clean(cq, qpn, srq);
925 spin_unlock_irq(&cq->lock);
926}