blob: ed6ac52355f1b9ddc81d4d8a13cdf16d1904c0e6 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/mlx5/qp.h>
35#include <linux/mlx5/srq.h>
36#include <linux/slab.h>
37#include <rdma/ib_umem.h>
Yann Droneaud43bc8892014-05-05 19:33:22 +020038#include <rdma/ib_user_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039
40#include "mlx5_ib.h"
41#include "user.h"
42
43/* not supported currently */
44static int srq_signature;
45
46static void *get_wqe(struct mlx5_ib_srq *srq, int n)
47{
48 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
49}
50
51static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
52{
53 struct ib_event event;
54 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
55
56 if (ibsrq->event_handler) {
57 event.device = ibsrq->device;
58 event.element.srq = ibsrq;
59 switch (type) {
60 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
61 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
62 break;
63 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
64 event.event = IB_EVENT_SRQ_ERR;
65 break;
66 default:
67 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
68 type, srq->srqn);
69 return;
70 }
71
72 ibsrq->event_handler(&event, ibsrq->srq_context);
73 }
74}
75
76static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +030077 struct mlx5_srq_attr *in,
78 struct ib_udata *udata, int buf_size)
Eli Cohene126ba92013-07-07 17:25:49 +030079{
80 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020081 struct mlx5_ib_create_srq ucmd = {};
Yann Droneaud43bc8892014-05-05 19:33:22 +020082 size_t ucmdlen;
Eli Cohene126ba92013-07-07 17:25:49 +030083 int err;
84 int npages;
85 int page_shift;
86 int ncont;
87 u32 offset;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020088 u32 uidx = MLX5_IB_DEFAULT_UIDX;
Eli Cohene126ba92013-07-07 17:25:49 +030089
Majd Dibbiny3d943c92016-02-14 18:35:52 +020090 ucmdlen = min(udata->inlen, sizeof(ucmd));
Yann Droneaud43bc8892014-05-05 19:33:22 +020091
92 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
Eli Cohene126ba92013-07-07 17:25:49 +030093 mlx5_ib_dbg(dev, "failed copy udata\n");
94 return -EFAULT;
95 }
Yann Droneaud43bc8892014-05-05 19:33:22 +020096
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020097 if (ucmd.reserved0 || ucmd.reserved1)
Yann Droneaud43bc8892014-05-05 19:33:22 +020098 return -EINVAL;
99
Majd Dibbiny3d943c92016-02-14 18:35:52 +0200100 if (udata->inlen > sizeof(ucmd) &&
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200101 !ib_is_udata_cleared(udata, sizeof(ucmd),
Majd Dibbiny3d943c92016-02-14 18:35:52 +0200102 udata->inlen - sizeof(ucmd)))
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200103 return -EINVAL;
104
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300105 if (in->type == IB_SRQT_XRC) {
Majd Dibbiny85d96912016-02-14 18:35:51 +0200106 err = get_srq_user_index(to_mucontext(pd->uobject->context),
107 &ucmd, udata->inlen, &uidx);
108 if (err)
109 return err;
110 }
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200111
Eli Cohene126ba92013-07-07 17:25:49 +0300112 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
113
114 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
115 0, 0);
116 if (IS_ERR(srq->umem)) {
117 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
118 err = PTR_ERR(srq->umem);
119 return err;
120 }
121
122 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
123 &page_shift, &ncont, NULL);
124 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
125 &offset);
126 if (err) {
127 mlx5_ib_warn(dev, "bad offset\n");
128 goto err_umem;
129 }
130
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300131 in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
132 if (!in->pas) {
Eli Cohene126ba92013-07-07 17:25:49 +0300133 err = -ENOMEM;
134 goto err_umem;
135 }
136
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300137 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300138
139 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
140 ucmd.db_addr, &srq->db);
141 if (err) {
142 mlx5_ib_dbg(dev, "map doorbell failed\n");
143 goto err_in;
144 }
145
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300146 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
147 in->page_offset = offset;
148 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
149 in->type == IB_SRQT_XRC)
150 in->user_index = uidx;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200151
Eli Cohene126ba92013-07-07 17:25:49 +0300152 return 0;
153
154err_in:
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300155 kvfree(in->pas);
Eli Cohene126ba92013-07-07 17:25:49 +0300156
157err_umem:
158 ib_umem_release(srq->umem);
159
160 return err;
161}
162
163static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300164 struct mlx5_srq_attr *in, int buf_size)
Eli Cohene126ba92013-07-07 17:25:49 +0300165{
166 int err;
167 int i;
168 struct mlx5_wqe_srq_next_seg *next;
169 int page_shift;
170 int npages;
171
Jack Morgenstein9603b612014-07-28 23:30:22 +0300172 err = mlx5_db_alloc(dev->mdev, &srq->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300173 if (err) {
174 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
175 return err;
176 }
177
Amir Vadai64ffaa22015-05-28 22:28:38 +0300178 if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300179 mlx5_ib_dbg(dev, "buf alloc failed\n");
180 err = -ENOMEM;
181 goto err_db;
182 }
183 page_shift = srq->buf.page_shift;
184
185 srq->head = 0;
186 srq->tail = srq->msrq.max - 1;
187 srq->wqe_ctr = 0;
188
189 for (i = 0; i < srq->msrq.max; i++) {
190 next = get_wqe(srq, i);
191 next->next_wqe_index =
192 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
193 }
194
195 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
196 mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
197 buf_size, page_shift, srq->buf.npages, npages);
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300198 in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
199 if (!in->pas) {
Eli Cohene126ba92013-07-07 17:25:49 +0300200 err = -ENOMEM;
201 goto err_buf;
202 }
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300203 mlx5_fill_page_array(&srq->buf, in->pas);
Eli Cohene126ba92013-07-07 17:25:49 +0300204
205 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
206 if (!srq->wrid) {
207 mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
208 (unsigned long)(srq->msrq.max * sizeof(u64)));
209 err = -ENOMEM;
210 goto err_in;
211 }
212 srq->wq_sig = !!srq_signature;
213
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300214 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
215 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
216 in->type == IB_SRQT_XRC)
217 in->user_index = MLX5_IB_DEFAULT_UIDX;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200218
Eli Cohene126ba92013-07-07 17:25:49 +0300219 return 0;
220
221err_in:
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300222 kvfree(in->pas);
Eli Cohene126ba92013-07-07 17:25:49 +0300223
224err_buf:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300225 mlx5_buf_free(dev->mdev, &srq->buf);
Eli Cohene126ba92013-07-07 17:25:49 +0300226
227err_db:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300228 mlx5_db_free(dev->mdev, &srq->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300229 return err;
230}
231
232static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
233{
234 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
235 ib_umem_release(srq->umem);
236}
237
238
239static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
240{
241 kfree(srq->wrid);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300242 mlx5_buf_free(dev->mdev, &srq->buf);
243 mlx5_db_free(dev->mdev, &srq->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300244}
245
246struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
247 struct ib_srq_init_attr *init_attr,
248 struct ib_udata *udata)
249{
250 struct mlx5_ib_dev *dev = to_mdev(pd->device);
251 struct mlx5_ib_srq *srq;
252 int desc_size;
253 int buf_size;
254 int err;
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300255 struct mlx5_srq_attr in = {0};
Saeed Mahameed938fe832015-05-28 22:28:41 +0300256 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
Eli Cohene126ba92013-07-07 17:25:49 +0300257
258 /* Sanity check SRQ size before proceeding */
Saeed Mahameed938fe832015-05-28 22:28:41 +0300259 if (init_attr->attr.max_wr >= max_srq_wqes) {
Eli Cohene126ba92013-07-07 17:25:49 +0300260 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
261 init_attr->attr.max_wr,
Saeed Mahameed938fe832015-05-28 22:28:41 +0300262 max_srq_wqes);
Eli Cohene126ba92013-07-07 17:25:49 +0300263 return ERR_PTR(-EINVAL);
264 }
265
266 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
267 if (!srq)
268 return ERR_PTR(-ENOMEM);
269
270 mutex_init(&srq->mutex);
271 spin_lock_init(&srq->lock);
272 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
273 srq->msrq.max_gs = init_attr->attr.max_sge;
274
275 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
276 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
277 desc_size = roundup_pow_of_two(desc_size);
278 desc_size = max_t(int, 32, desc_size);
279 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
280 sizeof(struct mlx5_wqe_data_seg);
281 srq->msrq.wqe_shift = ilog2(desc_size);
282 buf_size = srq->msrq.max * desc_size;
283 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
284 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
285 srq->msrq.max_avail_gather);
286
287 if (pd->uobject)
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300288 err = create_srq_user(pd, srq, &in, udata, buf_size);
Eli Cohene126ba92013-07-07 17:25:49 +0300289 else
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300290 err = create_srq_kernel(dev, srq, &in, buf_size);
Eli Cohene126ba92013-07-07 17:25:49 +0300291
292 if (err) {
293 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
294 pd->uobject ? "user" : "kernel", err);
295 goto err_srq;
296 }
297
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300298 in.type = init_attr->srq_type;
299 in.log_size = ilog2(srq->msrq.max);
300 in.wqe_shift = srq->msrq.wqe_shift - 4;
301 if (srq->wq_sig)
302 in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
303 if (init_attr->srq_type == IB_SRQT_XRC) {
304 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
305 in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
Eli Cohene126ba92013-07-07 17:25:49 +0300306 } else if (init_attr->srq_type == IB_SRQT_BASIC) {
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300307 in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
308 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
Eli Cohene126ba92013-07-07 17:25:49 +0300309 }
310
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300311 in.pd = to_mpd(pd)->pdn;
312 in.db_record = srq->db.dma;
313 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
314 kvfree(in.pas);
Eli Cohene126ba92013-07-07 17:25:49 +0300315 if (err) {
316 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
Moshe Lazer56e1ab02013-09-11 16:35:24 +0300317 goto err_usr_kern_srq;
Eli Cohene126ba92013-07-07 17:25:49 +0300318 }
319
320 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
321
322 srq->msrq.event = mlx5_ib_srq_event;
323 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
324
325 if (pd->uobject)
326 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
327 mlx5_ib_dbg(dev, "copy to user failed\n");
328 err = -EFAULT;
329 goto err_core;
330 }
331
332 init_attr->attr.max_wr = srq->msrq.max - 1;
333
334 return &srq->ibsrq;
335
336err_core:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300337 mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
Moshe Lazer56e1ab02013-09-11 16:35:24 +0300338
339err_usr_kern_srq:
Eli Cohene126ba92013-07-07 17:25:49 +0300340 if (pd->uobject)
341 destroy_srq_user(pd, srq);
342 else
343 destroy_srq_kernel(dev, srq);
344
345err_srq:
346 kfree(srq);
347
348 return ERR_PTR(err);
349}
350
351int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
352 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
353{
354 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
355 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
356 int ret;
357
358 /* We don't support resizing SRQs yet */
359 if (attr_mask & IB_SRQ_MAX_WR)
360 return -EINVAL;
361
362 if (attr_mask & IB_SRQ_LIMIT) {
363 if (attr->srq_limit >= srq->msrq.max)
364 return -EINVAL;
365
366 mutex_lock(&srq->mutex);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300367 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
Eli Cohene126ba92013-07-07 17:25:49 +0300368 mutex_unlock(&srq->mutex);
369
370 if (ret)
371 return ret;
372 }
373
374 return 0;
375}
376
377int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
378{
379 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
380 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
381 int ret;
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300382 struct mlx5_srq_attr *out;
Eli Cohene126ba92013-07-07 17:25:49 +0300383
384 out = kzalloc(sizeof(*out), GFP_KERNEL);
385 if (!out)
386 return -ENOMEM;
387
Jack Morgenstein9603b612014-07-28 23:30:22 +0300388 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
Eli Cohene126ba92013-07-07 17:25:49 +0300389 if (ret)
390 goto out_box;
391
Artemy Kovalyovaf1ba292016-06-17 15:33:32 +0300392 srq_attr->srq_limit = out->lwm;
Eli Cohene126ba92013-07-07 17:25:49 +0300393 srq_attr->max_wr = srq->msrq.max - 1;
394 srq_attr->max_sge = srq->msrq.max_gs;
395
396out_box:
397 kfree(out);
398 return ret;
399}
400
401int mlx5_ib_destroy_srq(struct ib_srq *srq)
402{
403 struct mlx5_ib_dev *dev = to_mdev(srq->device);
404 struct mlx5_ib_srq *msrq = to_msrq(srq);
405
Jack Morgenstein9603b612014-07-28 23:30:22 +0300406 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
Eli Cohene126ba92013-07-07 17:25:49 +0300407
408 if (srq->uobject) {
409 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
410 ib_umem_release(msrq->umem);
411 } else {
Eli Cohen1faacf82013-10-23 09:53:16 +0300412 destroy_srq_kernel(dev, msrq);
Eli Cohene126ba92013-07-07 17:25:49 +0300413 }
414
415 kfree(srq);
416 return 0;
417}
418
419void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
420{
421 struct mlx5_wqe_srq_next_seg *next;
422
423 /* always called with interrupts disabled. */
424 spin_lock(&srq->lock);
425
426 next = get_wqe(srq, srq->tail);
427 next->next_wqe_index = cpu_to_be16(wqe_index);
428 srq->tail = wqe_index;
429
430 spin_unlock(&srq->lock);
431}
432
433int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
434 struct ib_recv_wr **bad_wr)
435{
436 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
437 struct mlx5_wqe_srq_next_seg *next;
438 struct mlx5_wqe_data_seg *scat;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300439 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
440 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300441 unsigned long flags;
442 int err = 0;
443 int nreq;
444 int i;
445
446 spin_lock_irqsave(&srq->lock, flags);
447
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300448 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
449 err = -EIO;
450 *bad_wr = wr;
451 goto out;
452 }
453
Eli Cohene126ba92013-07-07 17:25:49 +0300454 for (nreq = 0; wr; nreq++, wr = wr->next) {
455 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
456 err = -EINVAL;
457 *bad_wr = wr;
458 break;
459 }
460
461 if (unlikely(srq->head == srq->tail)) {
462 err = -ENOMEM;
463 *bad_wr = wr;
464 break;
465 }
466
467 srq->wrid[srq->head] = wr->wr_id;
468
469 next = get_wqe(srq, srq->head);
470 srq->head = be16_to_cpu(next->next_wqe_index);
471 scat = (struct mlx5_wqe_data_seg *)(next + 1);
472
473 for (i = 0; i < wr->num_sge; i++) {
474 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
475 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
476 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
477 }
478
479 if (i < srq->msrq.max_avail_gather) {
480 scat[i].byte_count = 0;
481 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
482 scat[i].addr = 0;
483 }
484 }
485
486 if (likely(nreq)) {
487 srq->wqe_ctr += nreq;
488
489 /* Make sure that descriptors are written before
490 * doorbell record.
491 */
492 wmb();
493
494 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
495 }
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300496out:
Eli Cohene126ba92013-07-07 17:25:49 +0300497 spin_unlock_irqrestore(&srq->lock, flags);
498
499 return err;
500}