blob: 9a456a7ce352a856d016fc75ba9ccc6ebf31c62f [file] [log] [blame]
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -08001/*
Bryan O'Sullivan759d5762006-07-01 04:35:49 -07002 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -08003 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "ipath_verbs.h"
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -070035#include "ips_common.h"
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -080036
37/*
38 * Convert the AETH RNR timeout code into the number of milliseconds.
39 */
40const u32 ib_ipath_rnr_table[32] = {
41 656, /* 0 */
42 1, /* 1 */
43 1, /* 2 */
44 1, /* 3 */
45 1, /* 4 */
46 1, /* 5 */
47 1, /* 6 */
48 1, /* 7 */
49 1, /* 8 */
50 1, /* 9 */
51 1, /* A */
52 1, /* B */
53 1, /* C */
54 1, /* D */
55 2, /* E */
56 2, /* F */
57 3, /* 10 */
58 4, /* 11 */
59 6, /* 12 */
60 8, /* 13 */
61 11, /* 14 */
62 16, /* 15 */
63 21, /* 16 */
64 31, /* 17 */
65 41, /* 18 */
66 62, /* 19 */
67 82, /* 1A */
68 123, /* 1B */
69 164, /* 1C */
70 246, /* 1D */
71 328, /* 1E */
72 492 /* 1F */
73};
74
75/**
76 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
77 * @qp: the QP
78 *
79 * XXX Use a simple list for now. We might need a priority
80 * queue if we have lots of QPs waiting for RNR timeouts
81 * but that should be rare.
82 */
83void ipath_insert_rnr_queue(struct ipath_qp *qp)
84{
85 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
86 unsigned long flags;
87
88 spin_lock_irqsave(&dev->pending_lock, flags);
89 if (list_empty(&dev->rnrwait))
90 list_add(&qp->timerwait, &dev->rnrwait);
91 else {
92 struct list_head *l = &dev->rnrwait;
93 struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
94 timerwait);
95
96 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
97 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
98 l = l->next;
99 if (l->next == &dev->rnrwait)
100 break;
101 nqp = list_entry(l->next, struct ipath_qp,
102 timerwait);
103 }
104 list_add(&qp->timerwait, l);
105 }
106 spin_unlock_irqrestore(&dev->pending_lock, flags);
107}
108
109/**
110 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
111 * @qp: the QP
112 * @wr_id_only: update wr_id only, not SGEs
113 *
114 * Return 0 if no RWQE is available, otherwise return 1.
115 *
116 * Called at interrupt level with the QP r_rq.lock held.
117 */
118int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
119{
120 struct ipath_rq *rq;
121 struct ipath_srq *srq;
122 struct ipath_rwqe *wqe;
123 int ret;
124
125 if (!qp->ibqp.srq) {
126 rq = &qp->r_rq;
127 if (unlikely(rq->tail == rq->head)) {
128 ret = 0;
129 goto bail;
130 }
131 wqe = get_rwqe_ptr(rq, rq->tail);
132 qp->r_wr_id = wqe->wr_id;
133 if (!wr_id_only) {
134 qp->r_sge.sge = wqe->sg_list[0];
135 qp->r_sge.sg_list = wqe->sg_list + 1;
136 qp->r_sge.num_sge = wqe->num_sge;
137 qp->r_len = wqe->length;
138 }
139 if (++rq->tail >= rq->size)
140 rq->tail = 0;
141 ret = 1;
142 goto bail;
143 }
144
145 srq = to_isrq(qp->ibqp.srq);
146 rq = &srq->rq;
147 spin_lock(&rq->lock);
148 if (unlikely(rq->tail == rq->head)) {
149 spin_unlock(&rq->lock);
150 ret = 0;
151 goto bail;
152 }
153 wqe = get_rwqe_ptr(rq, rq->tail);
154 qp->r_wr_id = wqe->wr_id;
155 if (!wr_id_only) {
156 qp->r_sge.sge = wqe->sg_list[0];
157 qp->r_sge.sg_list = wqe->sg_list + 1;
158 qp->r_sge.num_sge = wqe->num_sge;
159 qp->r_len = wqe->length;
160 }
161 if (++rq->tail >= rq->size)
162 rq->tail = 0;
163 if (srq->ibsrq.event_handler) {
164 struct ib_event ev;
165 u32 n;
166
167 if (rq->head < rq->tail)
168 n = rq->size + rq->head - rq->tail;
169 else
170 n = rq->head - rq->tail;
171 if (n < srq->limit) {
172 srq->limit = 0;
173 spin_unlock(&rq->lock);
174 ev.device = qp->ibqp.device;
175 ev.element.srq = qp->ibqp.srq;
176 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
177 srq->ibsrq.event_handler(&ev,
178 srq->ibsrq.srq_context);
179 } else
180 spin_unlock(&rq->lock);
181 } else
182 spin_unlock(&rq->lock);
183 ret = 1;
184
185bail:
186 return ret;
187}
188
189/**
190 * ipath_ruc_loopback - handle UC and RC lookback requests
191 * @sqp: the loopback QP
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800192 *
193 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
194 * forward a WQE addressed to the same HCA.
195 * Note that although we are single threaded due to the tasklet, we still
196 * have to protect against post_send(). We don't have to worry about
197 * receive interrupts since this is a connected protocol and all packets
198 * will pass through here.
199 */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700200static void ipath_ruc_loopback(struct ipath_qp *sqp)
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800201{
202 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
203 struct ipath_qp *qp;
204 struct ipath_swqe *wqe;
205 struct ipath_sge *sge;
206 unsigned long flags;
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700207 struct ib_wc wc;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800208 u64 sdata;
209
210 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
211 if (!qp) {
212 dev->n_pkt_drops++;
213 return;
214 }
215
216again:
217 spin_lock_irqsave(&sqp->s_lock, flags);
218
219 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) {
220 spin_unlock_irqrestore(&sqp->s_lock, flags);
221 goto done;
222 }
223
224 /* Get the next send request. */
225 if (sqp->s_last == sqp->s_head) {
226 /* Send work queue is empty. */
227 spin_unlock_irqrestore(&sqp->s_lock, flags);
228 goto done;
229 }
230
231 /*
232 * We can rely on the entry not changing without the s_lock
233 * being held until we update s_last.
234 */
235 wqe = get_swqe_ptr(sqp, sqp->s_last);
236 spin_unlock_irqrestore(&sqp->s_lock, flags);
237
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700238 wc.wc_flags = 0;
239 wc.imm_data = 0;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800240
241 sqp->s_sge.sge = wqe->sg_list[0];
242 sqp->s_sge.sg_list = wqe->sg_list + 1;
243 sqp->s_sge.num_sge = wqe->wr.num_sge;
244 sqp->s_len = wqe->length;
245 switch (wqe->wr.opcode) {
246 case IB_WR_SEND_WITH_IMM:
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700247 wc.wc_flags = IB_WC_WITH_IMM;
248 wc.imm_data = wqe->wr.imm_data;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800249 /* FALLTHROUGH */
250 case IB_WR_SEND:
251 spin_lock_irqsave(&qp->r_rq.lock, flags);
252 if (!ipath_get_rwqe(qp, 0)) {
253 rnr_nak:
254 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
255 /* Handle RNR NAK */
256 if (qp->ibqp.qp_type == IB_QPT_UC)
257 goto send_comp;
258 if (sqp->s_rnr_retry == 0) {
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700259 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800260 goto err;
261 }
262 if (sqp->s_rnr_retry_cnt < 7)
263 sqp->s_rnr_retry--;
264 dev->n_rnr_naks++;
265 sqp->s_rnr_timeout =
266 ib_ipath_rnr_table[sqp->s_min_rnr_timer];
267 ipath_insert_rnr_queue(sqp);
268 goto done;
269 }
270 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
271 break;
272
273 case IB_WR_RDMA_WRITE_WITH_IMM:
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700274 wc.wc_flags = IB_WC_WITH_IMM;
275 wc.imm_data = wqe->wr.imm_data;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800276 spin_lock_irqsave(&qp->r_rq.lock, flags);
277 if (!ipath_get_rwqe(qp, 1))
278 goto rnr_nak;
279 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
280 /* FALLTHROUGH */
281 case IB_WR_RDMA_WRITE:
282 if (wqe->length == 0)
283 break;
284 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length,
285 wqe->wr.wr.rdma.remote_addr,
286 wqe->wr.wr.rdma.rkey,
287 IB_ACCESS_REMOTE_WRITE))) {
288 acc_err:
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700289 wc.status = IB_WC_REM_ACCESS_ERR;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800290 err:
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700291 wc.wr_id = wqe->wr.wr_id;
292 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
293 wc.vendor_err = 0;
294 wc.byte_len = 0;
295 wc.qp_num = sqp->ibqp.qp_num;
296 wc.src_qp = sqp->remote_qpn;
297 wc.pkey_index = 0;
298 wc.slid = sqp->remote_ah_attr.dlid;
299 wc.sl = sqp->remote_ah_attr.sl;
300 wc.dlid_path_bits = 0;
301 wc.port_num = 0;
302 ipath_sqerror_qp(sqp, &wc);
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800303 goto done;
304 }
305 break;
306
307 case IB_WR_RDMA_READ:
308 if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length,
309 wqe->wr.wr.rdma.remote_addr,
310 wqe->wr.wr.rdma.rkey,
311 IB_ACCESS_REMOTE_READ)))
312 goto acc_err;
313 if (unlikely(!(qp->qp_access_flags &
314 IB_ACCESS_REMOTE_READ)))
315 goto acc_err;
316 qp->r_sge.sge = wqe->sg_list[0];
317 qp->r_sge.sg_list = wqe->sg_list + 1;
318 qp->r_sge.num_sge = wqe->wr.num_sge;
319 break;
320
321 case IB_WR_ATOMIC_CMP_AND_SWP:
322 case IB_WR_ATOMIC_FETCH_AND_ADD:
323 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64),
324 wqe->wr.wr.rdma.remote_addr,
325 wqe->wr.wr.rdma.rkey,
326 IB_ACCESS_REMOTE_ATOMIC)))
327 goto acc_err;
328 /* Perform atomic OP and save result. */
329 sdata = wqe->wr.wr.atomic.swap;
330 spin_lock_irqsave(&dev->pending_lock, flags);
331 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
332 if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
333 *(u64 *) qp->r_sge.sge.vaddr =
334 qp->r_atomic_data + sdata;
335 else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add)
336 *(u64 *) qp->r_sge.sge.vaddr = sdata;
337 spin_unlock_irqrestore(&dev->pending_lock, flags);
338 *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data;
339 goto send_comp;
340
341 default:
342 goto done;
343 }
344
345 sge = &sqp->s_sge.sge;
346 while (sqp->s_len) {
347 u32 len = sqp->s_len;
348
349 if (len > sge->length)
350 len = sge->length;
351 BUG_ON(len == 0);
352 ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
353 sge->vaddr += len;
354 sge->length -= len;
355 sge->sge_length -= len;
356 if (sge->sge_length == 0) {
357 if (--sqp->s_sge.num_sge)
358 *sge = *sqp->s_sge.sg_list++;
359 } else if (sge->length == 0 && sge->mr != NULL) {
360 if (++sge->n >= IPATH_SEGSZ) {
361 if (++sge->m >= sge->mr->mapsz)
362 break;
363 sge->n = 0;
364 }
365 sge->vaddr =
366 sge->mr->map[sge->m]->segs[sge->n].vaddr;
367 sge->length =
368 sge->mr->map[sge->m]->segs[sge->n].length;
369 }
370 sqp->s_len -= len;
371 }
372
373 if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
374 wqe->wr.opcode == IB_WR_RDMA_READ)
375 goto send_comp;
376
377 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700378 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800379 else
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700380 wc.opcode = IB_WC_RECV;
381 wc.wr_id = qp->r_wr_id;
382 wc.status = IB_WC_SUCCESS;
383 wc.vendor_err = 0;
384 wc.byte_len = wqe->length;
385 wc.qp_num = qp->ibqp.qp_num;
386 wc.src_qp = qp->remote_qpn;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800387 /* XXX do we know which pkey matched? Only needed for GSI. */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700388 wc.pkey_index = 0;
389 wc.slid = qp->remote_ah_attr.dlid;
390 wc.sl = qp->remote_ah_attr.sl;
391 wc.dlid_path_bits = 0;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800392 /* Signal completion event if the solicited bit is set. */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700393 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800394 wqe->wr.send_flags & IB_SEND_SOLICITED);
395
396send_comp:
397 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
398
399 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
400 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700401 wc.wr_id = wqe->wr.wr_id;
402 wc.status = IB_WC_SUCCESS;
403 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
404 wc.vendor_err = 0;
405 wc.byte_len = wqe->length;
406 wc.qp_num = sqp->ibqp.qp_num;
407 wc.src_qp = 0;
408 wc.pkey_index = 0;
409 wc.slid = 0;
410 wc.sl = 0;
411 wc.dlid_path_bits = 0;
412 wc.port_num = 0;
413 ipath_cq_enter(to_icq(sqp->ibqp.send_cq), &wc, 0);
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800414 }
415
416 /* Update s_last now that we are finished with the SWQE */
417 spin_lock_irqsave(&sqp->s_lock, flags);
418 if (++sqp->s_last >= sqp->s_size)
419 sqp->s_last = 0;
420 spin_unlock_irqrestore(&sqp->s_lock, flags);
421 goto again;
422
423done:
424 if (atomic_dec_and_test(&qp->refcount))
425 wake_up(&qp->wait);
426}
427
428/**
429 * ipath_no_bufs_available - tell the layer driver we need buffers
430 * @qp: the QP that caused the problem
431 * @dev: the device we ran out of buffers on
432 *
433 * Called when we run out of PIO buffers.
434 */
435void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
436{
437 unsigned long flags;
438
439 spin_lock_irqsave(&dev->pending_lock, flags);
Bryan O'Sullivan94b8d9f2006-05-23 11:32:32 -0700440 if (list_empty(&qp->piowait))
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800441 list_add_tail(&qp->piowait, &dev->piowait);
442 spin_unlock_irqrestore(&dev->pending_lock, flags);
443 /*
444 * Note that as soon as ipath_layer_want_buffer() is called and
445 * possibly before it returns, ipath_ib_piobufavail()
446 * could be called. If we are still in the tasklet function,
447 * tasklet_hi_schedule() will not call us until the next time
448 * tasklet_hi_schedule() is called.
449 * We clear the tasklet flag now since we are committing to return
450 * from the tasklet function.
451 */
452 clear_bit(IPATH_S_BUSY, &qp->s_flags);
453 tasklet_unlock(&qp->s_task);
454 ipath_layer_want_buffer(dev->dd);
455 dev->n_piowait++;
456}
457
458/**
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700459 * ipath_post_ruc_send - post RC and UC sends
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800460 * @qp: the QP to post on
461 * @wr: the work request to send
462 */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700463int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800464{
465 struct ipath_swqe *wqe;
466 unsigned long flags;
467 u32 next;
468 int i, j;
469 int acc;
470 int ret;
471
472 /*
473 * Don't allow RDMA reads or atomic operations on UC or
474 * undefined operations.
475 * Make sure buffer is large enough to hold the result for atomics.
476 */
477 if (qp->ibqp.qp_type == IB_QPT_UC) {
478 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) {
479 ret = -EINVAL;
480 goto bail;
481 }
482 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
483 ret = -EINVAL;
484 goto bail;
485 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
486 (wr->num_sge == 0 ||
487 wr->sg_list[0].length < sizeof(u64) ||
488 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
489 ret = -EINVAL;
490 goto bail;
491 }
492 /* IB spec says that num_sge == 0 is OK. */
493 if (wr->num_sge > qp->s_max_sge) {
494 ret = -ENOMEM;
495 goto bail;
496 }
497 spin_lock_irqsave(&qp->s_lock, flags);
498 next = qp->s_head + 1;
499 if (next >= qp->s_size)
500 next = 0;
501 if (next == qp->s_last) {
502 spin_unlock_irqrestore(&qp->s_lock, flags);
503 ret = -EINVAL;
504 goto bail;
505 }
506
507 wqe = get_swqe_ptr(qp, qp->s_head);
508 wqe->wr = *wr;
509 wqe->ssn = qp->s_ssn++;
510 wqe->sg_list[0].mr = NULL;
511 wqe->sg_list[0].vaddr = NULL;
512 wqe->sg_list[0].length = 0;
513 wqe->sg_list[0].sge_length = 0;
514 wqe->length = 0;
515 acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0;
516 for (i = 0, j = 0; i < wr->num_sge; i++) {
517 if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
518 spin_unlock_irqrestore(&qp->s_lock, flags);
519 ret = -EINVAL;
520 goto bail;
521 }
522 if (wr->sg_list[i].length == 0)
523 continue;
524 if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table,
525 &wqe->sg_list[j], &wr->sg_list[i],
526 acc)) {
527 spin_unlock_irqrestore(&qp->s_lock, flags);
528 ret = -EINVAL;
529 goto bail;
530 }
531 wqe->length += wr->sg_list[i].length;
532 j++;
533 }
534 wqe->wr.num_sge = j;
535 qp->s_head = next;
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800536 spin_unlock_irqrestore(&qp->s_lock, flags);
537
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700538 ipath_do_ruc_send((unsigned long) qp);
Bryan O'Sullivane28c00a2006-03-29 15:23:37 -0800539
540 ret = 0;
541
542bail:
543 return ret;
544}
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700545
546/**
547 * ipath_make_grh - construct a GRH header
548 * @dev: a pointer to the ipath device
549 * @hdr: a pointer to the GRH header being constructed
550 * @grh: the global route address to send to
551 * @hwords: the number of 32 bit words of header being sent
552 * @nwords: the number of 32 bit words of data being sent
553 *
554 * Return the size of the header in 32 bit words.
555 */
556u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
557 struct ib_global_route *grh, u32 hwords, u32 nwords)
558{
559 hdr->version_tclass_flow =
560 cpu_to_be32((6 << 28) |
561 (grh->traffic_class << 20) |
562 grh->flow_label);
563 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
564 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
565 hdr->next_hdr = 0x1B;
566 hdr->hop_limit = grh->hop_limit;
567 /* The SGID is 32-bit aligned. */
568 hdr->sgid.global.subnet_prefix = dev->gid_prefix;
569 hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd);
570 hdr->dgid = grh->dgid;
571
572 /* GRH header size in 32-bit words. */
573 return sizeof(struct ib_grh) / sizeof(u32);
574}
575
576/**
577 * ipath_do_ruc_send - perform a send on an RC or UC QP
578 * @data: contains a pointer to the QP
579 *
580 * Process entries in the send work queue until credit or queue is
581 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
582 * Otherwise, after we drop the QP s_lock, two threads could send
583 * packets out of order.
584 */
585void ipath_do_ruc_send(unsigned long data)
586{
587 struct ipath_qp *qp = (struct ipath_qp *)data;
588 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
589 unsigned long flags;
590 u16 lrh0;
591 u32 nwords;
592 u32 extra_bytes;
593 u32 bth0;
594 u32 bth2;
595 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
596 struct ipath_other_headers *ohdr;
597
598 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
599 goto bail;
600
601 if (unlikely(qp->remote_ah_attr.dlid ==
602 ipath_layer_get_lid(dev->dd))) {
603 ipath_ruc_loopback(qp);
604 goto clear;
605 }
606
607 ohdr = &qp->s_hdr.u.oth;
608 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
609 ohdr = &qp->s_hdr.u.l.oth;
610
611again:
612 /* Check for a constructed packet to be sent. */
613 if (qp->s_hdrwords != 0) {
614 /*
615 * If no PIO bufs are available, return. An interrupt will
616 * call ipath_ib_piobufavail() when one is available.
617 */
618 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
619 (u32 *) &qp->s_hdr, qp->s_cur_size,
620 qp->s_cur_sge)) {
621 ipath_no_bufs_available(qp, dev);
622 goto bail;
623 }
624 dev->n_unicast_xmit++;
625 /* Record that we sent the packet and s_hdr is empty. */
626 qp->s_hdrwords = 0;
627 }
628
629 /*
630 * The lock is needed to synchronize between setting
631 * qp->s_ack_state, resend timer, and post_send().
632 */
633 spin_lock_irqsave(&qp->s_lock, flags);
634
635 /* Sending responses has higher priority over sending requests. */
636 if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE &&
637 (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
638 bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
639 else if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
640 ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
641 ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
642 /*
643 * Clear the busy bit before unlocking to avoid races with
644 * adding new work queue items and then failing to process
645 * them.
646 */
647 clear_bit(IPATH_S_BUSY, &qp->s_flags);
648 spin_unlock_irqrestore(&qp->s_lock, flags);
649 goto bail;
650 }
651
652 spin_unlock_irqrestore(&qp->s_lock, flags);
653
654 /* Construct the header. */
655 extra_bytes = (4 - qp->s_cur_size) & 3;
656 nwords = (qp->s_cur_size + extra_bytes) >> 2;
657 lrh0 = IPS_LRH_BTH;
658 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
659 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
660 &qp->remote_ah_attr.grh,
661 qp->s_hdrwords, nwords);
662 lrh0 = IPS_LRH_GRH;
663 }
664 lrh0 |= qp->remote_ah_attr.sl << 4;
665 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
666 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
667 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
668 SIZE_OF_CRC);
669 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
670 bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
671 bth0 |= extra_bytes << 20;
672 ohdr->bth[0] = cpu_to_be32(bth0);
673 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
674 ohdr->bth[2] = cpu_to_be32(bth2);
675
676 /* Check for more work to do. */
677 goto again;
678
679clear:
680 clear_bit(IPATH_S_BUSY, &qp->s_flags);
681bail:
682 return;
683}