blob: 96fcc64c61cdb8a04378b6dcffdfb77c735313fb [file] [log] [blame]
Roland Dreierec34a922005-08-19 10:59:31 -07001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
33 */
34
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080035#include <linux/slab.h>
36#include <linux/string.h>
37
Roland Dreierec34a922005-08-19 10:59:31 -070038#include "mthca_dev.h"
39#include "mthca_cmd.h"
40#include "mthca_memfree.h"
41#include "mthca_wqe.h"
42
43enum {
44 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
45};
46
47struct mthca_tavor_srq_context {
48 __be64 wqe_base_ds; /* low 6 bits is descriptor size */
49 __be32 state_pd;
50 __be32 lkey;
51 __be32 uar;
52 __be32 wqe_cnt;
53 u32 reserved[2];
54};
55
56struct mthca_arbel_srq_context {
57 __be32 state_logsize_srqn;
58 __be32 lkey;
59 __be32 db_index;
60 __be32 logstride_usrpage;
61 __be64 wqe_base;
62 __be32 eq_pd;
63 __be16 limit_watermark;
64 __be16 wqe_cnt;
65 u16 reserved1;
66 __be16 wqe_counter;
67 u32 reserved2[3];
68};
69
70static void *get_wqe(struct mthca_srq *srq, int n)
71{
72 if (srq->is_direct)
73 return srq->queue.direct.buf + (n << srq->wqe_shift);
74 else
75 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
76 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
77}
78
79/*
80 * Return a pointer to the location within a WQE that we're using as a
Roland Dreiere5b251a2005-10-30 13:07:03 -080081 * link when the WQE is in the free list. We use the imm field
82 * because in the Tavor case, posting a WQE may overwrite the next
83 * segment of the previous WQE, but a receive WQE will never touch the
84 * imm field. This avoids corrupting our free list if the previous
85 * WQE has already completed and been put on the free list when we
86 * post the next WQE.
Roland Dreierec34a922005-08-19 10:59:31 -070087 */
88static inline int *wqe_to_link(void *wqe)
89{
Roland Dreiere5b251a2005-10-30 13:07:03 -080090 return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
Roland Dreierec34a922005-08-19 10:59:31 -070091}
92
93static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
94 struct mthca_pd *pd,
95 struct mthca_srq *srq,
96 struct mthca_tavor_srq_context *context)
97{
98 memset(context, 0, sizeof *context);
99
100 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
101 context->state_pd = cpu_to_be32(pd->pd_num);
102 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
103
104 if (pd->ibpd.uobject)
105 context->uar =
106 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
107 else
108 context->uar = cpu_to_be32(dev->driver_uar.index);
109}
110
111static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
112 struct mthca_pd *pd,
113 struct mthca_srq *srq,
114 struct mthca_arbel_srq_context *context)
115{
116 int logsize;
117
118 memset(context, 0, sizeof *context);
119
120 logsize = long_log2(srq->max) + srq->wqe_shift;
121 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
122 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
123 context->db_index = cpu_to_be32(srq->db_index);
124 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
125 if (pd->ibpd.uobject)
126 context->logstride_usrpage |=
127 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
128 else
129 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
130 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
131}
132
133static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
134{
135 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
136 srq->is_direct, &srq->mr);
137 kfree(srq->wrid);
138}
139
140static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
141 struct mthca_srq *srq)
142{
143 struct mthca_data_seg *scatter;
144 void *wqe;
145 int err;
146 int i;
147
148 if (pd->ibpd.uobject)
149 return 0;
150
151 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
152 if (!srq->wrid)
153 return -ENOMEM;
154
155 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
156 MTHCA_MAX_DIRECT_SRQ_SIZE,
157 &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
158 if (err) {
159 kfree(srq->wrid);
160 return err;
161 }
162
163 /*
164 * Now initialize the SRQ buffer so that all of the WQEs are
165 * linked into the list of free WQEs. In addition, set the
166 * scatter list L_Keys to the sentry value of 0x100.
167 */
168 for (i = 0; i < srq->max; ++i) {
169 wqe = get_wqe(srq, i);
170
171 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
172
173 for (scatter = wqe + sizeof (struct mthca_next_seg);
174 (void *) scatter < wqe + (1 << srq->wqe_shift);
175 ++scatter)
176 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
177 }
178
Roland Dreier6577ae52005-09-19 09:17:56 -0700179 srq->last = get_wqe(srq, srq->max - 1);
180
Roland Dreierec34a922005-08-19 10:59:31 -0700181 return 0;
182}
183
184int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
185 struct ib_srq_attr *attr, struct mthca_srq *srq)
186{
187 struct mthca_mailbox *mailbox;
188 u8 status;
189 int ds;
190 int err;
191
192 /* Sanity check SRQ size before proceeding */
Jack Morgensteinefaae8f2005-10-10 13:48:07 -0700193 if (attr->max_wr > dev->limits.max_srq_wqes ||
194 attr->max_sge > dev->limits.max_sg)
Roland Dreierec34a922005-08-19 10:59:31 -0700195 return -EINVAL;
196
197 srq->max = attr->max_wr;
198 srq->max_gs = attr->max_sge;
Roland Dreierec34a922005-08-19 10:59:31 -0700199 srq->counter = 0;
200
201 if (mthca_is_memfree(dev))
202 srq->max = roundup_pow_of_two(srq->max + 1);
203
Jack Morgenstein1d7d2f62006-01-04 14:42:39 -0800204 ds = max(64UL,
Roland Dreierec34a922005-08-19 10:59:31 -0700205 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
206 srq->max_gs * sizeof (struct mthca_data_seg)));
207 srq->wqe_shift = long_log2(ds);
208
209 srq->srqn = mthca_alloc(&dev->srq_table.alloc);
210 if (srq->srqn == -1)
211 return -ENOMEM;
212
213 if (mthca_is_memfree(dev)) {
214 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
215 if (err)
216 goto err_out;
217
218 if (!pd->ibpd.uobject) {
219 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
220 srq->srqn, &srq->db);
221 if (srq->db_index < 0) {
222 err = -ENOMEM;
223 goto err_out_icm;
224 }
225 }
226 }
227
228 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
229 if (IS_ERR(mailbox)) {
230 err = PTR_ERR(mailbox);
231 goto err_out_db;
232 }
233
234 err = mthca_alloc_srq_buf(dev, pd, srq);
235 if (err)
236 goto err_out_mailbox;
237
238 spin_lock_init(&srq->lock);
239 atomic_set(&srq->refcount, 1);
240 init_waitqueue_head(&srq->wait);
241
242 if (mthca_is_memfree(dev))
243 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
244 else
245 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
246
247 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
248
249 if (err) {
250 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
251 goto err_out_free_buf;
252 }
253 if (status) {
254 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
255 status);
256 err = -EINVAL;
257 goto err_out_free_buf;
258 }
259
260 spin_lock_irq(&dev->srq_table.lock);
261 if (mthca_array_set(&dev->srq_table.srq,
262 srq->srqn & (dev->limits.num_srqs - 1),
263 srq)) {
264 spin_unlock_irq(&dev->srq_table.lock);
265 goto err_out_free_srq;
266 }
267 spin_unlock_irq(&dev->srq_table.lock);
268
269 mthca_free_mailbox(dev, mailbox);
270
271 srq->first_free = 0;
272 srq->last_free = srq->max - 1;
273
Dotan Barake10e2712006-03-20 10:08:26 -0800274 attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
Dotan Barakabb6e9b2006-02-23 12:13:51 -0800275 attr->max_sge = srq->max_gs;
276
Roland Dreierec34a922005-08-19 10:59:31 -0700277 return 0;
278
279err_out_free_srq:
280 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
281 if (err)
282 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
283 else if (status)
284 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
285
286err_out_free_buf:
287 if (!pd->ibpd.uobject)
288 mthca_free_srq_buf(dev, srq);
289
290err_out_mailbox:
291 mthca_free_mailbox(dev, mailbox);
292
293err_out_db:
294 if (!pd->ibpd.uobject && mthca_is_memfree(dev))
295 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
296
297err_out_icm:
298 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
299
300err_out:
301 mthca_free(&dev->srq_table.alloc, srq->srqn);
302
303 return err;
304}
305
306void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
307{
308 struct mthca_mailbox *mailbox;
309 int err;
310 u8 status;
311
312 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
313 if (IS_ERR(mailbox)) {
314 mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
315 return;
316 }
317
318 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
319 if (err)
320 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
321 else if (status)
322 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
323
324 spin_lock_irq(&dev->srq_table.lock);
325 mthca_array_clear(&dev->srq_table.srq,
326 srq->srqn & (dev->limits.num_srqs - 1));
327 spin_unlock_irq(&dev->srq_table.lock);
328
329 atomic_dec(&srq->refcount);
330 wait_event(srq->wait, !atomic_read(&srq->refcount));
331
332 if (!srq->ibsrq.uobject) {
333 mthca_free_srq_buf(dev, srq);
334 if (mthca_is_memfree(dev))
335 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
336 }
337
338 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
339 mthca_free(&dev->srq_table.alloc, srq->srqn);
340 mthca_free_mailbox(dev, mailbox);
341}
342
Roland Dreier90f104d2005-10-06 13:15:56 -0700343int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
344 enum ib_srq_attr_mask attr_mask)
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800345{
Roland Dreier90f104d2005-10-06 13:15:56 -0700346 struct mthca_dev *dev = to_mdev(ibsrq->device);
347 struct mthca_srq *srq = to_msrq(ibsrq);
348 int ret;
349 u8 status;
350
351 /* We don't support resizing SRQs (yet?) */
352 if (attr_mask & IB_SRQ_MAX_WR)
353 return -EINVAL;
354
355 if (attr_mask & IB_SRQ_LIMIT) {
356 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
357 if (ret)
358 return ret;
359 if (status)
360 return -EINVAL;
361 }
362
363 return 0;
364}
365
Eli Cohen8ebe5072006-02-13 16:40:21 -0800366int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
367{
368 struct mthca_dev *dev = to_mdev(ibsrq->device);
369 struct mthca_srq *srq = to_msrq(ibsrq);
370 struct mthca_mailbox *mailbox;
371 struct mthca_arbel_srq_context *arbel_ctx;
372 u8 status;
373 int err;
374
375 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
376 if (IS_ERR(mailbox))
377 return PTR_ERR(mailbox);
378
379 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
380 if (err)
381 goto out;
382
383 if (mthca_is_memfree(dev)) {
384 arbel_ctx = mailbox->buf;
385 srq_attr->srq_limit = arbel_ctx->limit_watermark;
386 } else
387 srq_attr->srq_limit = 0;
388
Dotan Barake10e2712006-03-20 10:08:26 -0800389 srq_attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
Eli Cohen8ebe5072006-02-13 16:40:21 -0800390 srq_attr->max_sge = srq->max_gs;
391
392out:
393 mthca_free_mailbox(dev, mailbox);
394
395 return err;
396}
397
Roland Dreierec34a922005-08-19 10:59:31 -0700398void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
399 enum ib_event_type event_type)
400{
401 struct mthca_srq *srq;
402 struct ib_event event;
403
404 spin_lock(&dev->srq_table.lock);
405 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
406 if (srq)
407 atomic_inc(&srq->refcount);
408 spin_unlock(&dev->srq_table.lock);
409
410 if (!srq) {
411 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
412 return;
413 }
414
415 if (!srq->ibsrq.event_handler)
416 goto out;
417
418 event.device = &dev->ib_dev;
419 event.event = event_type;
Roland Dreier90f104d2005-10-06 13:15:56 -0700420 event.element.srq = &srq->ibsrq;
Roland Dreierec34a922005-08-19 10:59:31 -0700421 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
422
423out:
424 if (atomic_dec_and_test(&srq->refcount))
425 wake_up(&srq->wait);
426}
427
428/*
429 * This function must be called with IRQs disabled.
430 */
431void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
432{
433 int ind;
434
435 ind = wqe_addr >> srq->wqe_shift;
436
437 spin_lock(&srq->lock);
438
439 if (likely(srq->first_free >= 0))
440 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
441 else
442 srq->first_free = ind;
443
444 *wqe_to_link(get_wqe(srq, ind)) = -1;
445 srq->last_free = ind;
446
447 spin_unlock(&srq->lock);
448}
449
450int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
451 struct ib_recv_wr **bad_wr)
452{
453 struct mthca_dev *dev = to_mdev(ibsrq->device);
454 struct mthca_srq *srq = to_msrq(ibsrq);
Michael S. Tsirkinae57e242005-11-09 14:59:57 -0800455 __be32 doorbell[2];
Roland Dreierec34a922005-08-19 10:59:31 -0700456 unsigned long flags;
457 int err = 0;
458 int first_ind;
459 int ind;
460 int next_ind;
461 int nreq;
462 int i;
463 void *wqe;
464 void *prev_wqe;
465
466 spin_lock_irqsave(&srq->lock, flags);
467
468 first_ind = srq->first_free;
469
470 for (nreq = 0; wr; ++nreq, wr = wr->next) {
Michael S. Tsirkinae57e242005-11-09 14:59:57 -0800471 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
472 nreq = 0;
473
474 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
475 doorbell[1] = cpu_to_be32(srq->srqn << 8);
476
477 /*
478 * Make sure that descriptors are written
479 * before doorbell is rung.
480 */
481 wmb();
482
483 mthca_write64(doorbell,
484 dev->kar + MTHCA_RECEIVE_DOORBELL,
485 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
486
487 first_ind = srq->first_free;
488 }
489
Roland Dreierec34a922005-08-19 10:59:31 -0700490 ind = srq->first_free;
491
492 if (ind < 0) {
493 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
494 err = -ENOMEM;
495 *bad_wr = wr;
Roland Dreier38531942005-09-18 14:00:17 -0700496 break;
Roland Dreierec34a922005-08-19 10:59:31 -0700497 }
498
499 wqe = get_wqe(srq, ind);
500 next_ind = *wqe_to_link(wqe);
Roland Dreiere23d6d22005-10-06 13:25:16 -0700501
502 if (next_ind < 0) {
503 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
504 err = -ENOMEM;
505 *bad_wr = wr;
506 break;
507 }
508
Roland Dreierec34a922005-08-19 10:59:31 -0700509 prev_wqe = srq->last;
510 srq->last = wqe;
511
512 ((struct mthca_next_seg *) wqe)->nda_op = 0;
513 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
514 /* flags field will always remain 0 */
515
516 wqe += sizeof (struct mthca_next_seg);
517
518 if (unlikely(wr->num_sge > srq->max_gs)) {
519 err = -EINVAL;
520 *bad_wr = wr;
521 srq->last = prev_wqe;
Roland Dreier38531942005-09-18 14:00:17 -0700522 break;
Roland Dreierec34a922005-08-19 10:59:31 -0700523 }
524
525 for (i = 0; i < wr->num_sge; ++i) {
526 ((struct mthca_data_seg *) wqe)->byte_count =
527 cpu_to_be32(wr->sg_list[i].length);
528 ((struct mthca_data_seg *) wqe)->lkey =
529 cpu_to_be32(wr->sg_list[i].lkey);
530 ((struct mthca_data_seg *) wqe)->addr =
531 cpu_to_be64(wr->sg_list[i].addr);
532 wqe += sizeof (struct mthca_data_seg);
533 }
534
535 if (i < srq->max_gs) {
536 ((struct mthca_data_seg *) wqe)->byte_count = 0;
537 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
538 ((struct mthca_data_seg *) wqe)->addr = 0;
539 }
540
Roland Dreierd6cff022005-09-13 10:41:03 -0700541 ((struct mthca_next_seg *) prev_wqe)->nda_op =
542 cpu_to_be32((ind << srq->wqe_shift) | 1);
543 wmb();
544 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
545 cpu_to_be32(MTHCA_NEXT_DBD);
Roland Dreierec34a922005-08-19 10:59:31 -0700546
547 srq->wrid[ind] = wr->wr_id;
548 srq->first_free = next_ind;
549 }
550
Roland Dreierec34a922005-08-19 10:59:31 -0700551 if (likely(nreq)) {
Roland Dreierec34a922005-08-19 10:59:31 -0700552 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
553 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
554
555 /*
556 * Make sure that descriptors are written before
557 * doorbell is rung.
558 */
559 wmb();
560
561 mthca_write64(doorbell,
562 dev->kar + MTHCA_RECEIVE_DOORBELL,
563 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
564 }
565
566 spin_unlock_irqrestore(&srq->lock, flags);
567 return err;
568}
569
570int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
571 struct ib_recv_wr **bad_wr)
572{
573 struct mthca_dev *dev = to_mdev(ibsrq->device);
574 struct mthca_srq *srq = to_msrq(ibsrq);
575 unsigned long flags;
576 int err = 0;
577 int ind;
578 int next_ind;
579 int nreq;
580 int i;
581 void *wqe;
582
583 spin_lock_irqsave(&srq->lock, flags);
584
585 for (nreq = 0; wr; ++nreq, wr = wr->next) {
586 ind = srq->first_free;
587
588 if (ind < 0) {
589 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
590 err = -ENOMEM;
591 *bad_wr = wr;
Roland Dreier38531942005-09-18 14:00:17 -0700592 break;
Roland Dreierec34a922005-08-19 10:59:31 -0700593 }
594
595 wqe = get_wqe(srq, ind);
596 next_ind = *wqe_to_link(wqe);
597
Roland Dreiere23d6d22005-10-06 13:25:16 -0700598 if (next_ind < 0) {
599 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
600 err = -ENOMEM;
601 *bad_wr = wr;
602 break;
603 }
604
Roland Dreierec34a922005-08-19 10:59:31 -0700605 ((struct mthca_next_seg *) wqe)->nda_op =
606 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
607 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
608 /* flags field will always remain 0 */
609
610 wqe += sizeof (struct mthca_next_seg);
611
612 if (unlikely(wr->num_sge > srq->max_gs)) {
613 err = -EINVAL;
614 *bad_wr = wr;
Roland Dreier38531942005-09-18 14:00:17 -0700615 break;
Roland Dreierec34a922005-08-19 10:59:31 -0700616 }
617
618 for (i = 0; i < wr->num_sge; ++i) {
619 ((struct mthca_data_seg *) wqe)->byte_count =
620 cpu_to_be32(wr->sg_list[i].length);
621 ((struct mthca_data_seg *) wqe)->lkey =
622 cpu_to_be32(wr->sg_list[i].lkey);
623 ((struct mthca_data_seg *) wqe)->addr =
624 cpu_to_be64(wr->sg_list[i].addr);
625 wqe += sizeof (struct mthca_data_seg);
626 }
627
628 if (i < srq->max_gs) {
629 ((struct mthca_data_seg *) wqe)->byte_count = 0;
630 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
631 ((struct mthca_data_seg *) wqe)->addr = 0;
632 }
633
634 srq->wrid[ind] = wr->wr_id;
635 srq->first_free = next_ind;
636 }
637
638 if (likely(nreq)) {
639 srq->counter += nreq;
640
641 /*
642 * Make sure that descriptors are written before
643 * we write doorbell record.
644 */
645 wmb();
646 *srq->db = cpu_to_be32(srq->counter);
647 }
648
649 spin_unlock_irqrestore(&srq->lock, flags);
650 return err;
651}
652
653int __devinit mthca_init_srq_table(struct mthca_dev *dev)
654{
655 int err;
656
657 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
658 return 0;
659
660 spin_lock_init(&dev->srq_table.lock);
661
662 err = mthca_alloc_init(&dev->srq_table.alloc,
663 dev->limits.num_srqs,
664 dev->limits.num_srqs - 1,
665 dev->limits.reserved_srqs);
666 if (err)
667 return err;
668
669 err = mthca_array_init(&dev->srq_table.srq,
670 dev->limits.num_srqs);
671 if (err)
672 mthca_alloc_cleanup(&dev->srq_table.alloc);
673
674 return err;
675}
676
677void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev)
678{
679 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
680 return;
681
682 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
683 mthca_alloc_cleanup(&dev->srq_table.alloc);
684}