blob: 52caa2edf5a4068be02162390c4ad6952dcc3a89 [file] [log] [blame]
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001/*
Bryan O'Sullivan759d5762006-07-01 04:35:49 -07002 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08003 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "ipath_verbs.h"
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -070035#include "ipath_kernel.h"
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -080036
37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_RC_##x
39
40/**
41 * ipath_init_restart- initialize the qp->s_sge after a restart
42 * @qp: the QP who's SGE we're restarting
43 * @wqe: the work queue to initialize the QP's SGE from
44 *
Bryan O'Sullivan12eef412006-07-01 04:36:10 -070045 * The QP s_lock should be held and interrupts disabled.
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -080046 */
47static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
48{
49 struct ipath_ibdev *dev;
50 u32 len;
51
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -070052 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) *
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -080053 ib_mtu_enum_to_int(qp->path_mtu);
54 qp->s_sge.sge = wqe->sg_list[0];
55 qp->s_sge.sg_list = wqe->sg_list + 1;
56 qp->s_sge.num_sge = wqe->wr.num_sge;
57 ipath_skip_sge(&qp->s_sge, len);
58 qp->s_len = wqe->length - len;
59 dev = to_idev(qp->ibqp.device);
60 spin_lock(&dev->pending_lock);
Bryan O'Sullivan94b8d9f2006-05-23 11:32:32 -070061 if (list_empty(&qp->timerwait))
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -080062 list_add_tail(&qp->timerwait,
63 &dev->pending[dev->pending_index]);
64 spin_unlock(&dev->pending_lock);
65}
66
67/**
68 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
69 * @qp: a pointer to the QP
70 * @ohdr: a pointer to the IB header being constructed
71 * @pmtu: the path MTU
72 *
73 * Return bth0 if constructed; otherwise, return 0.
74 * Note the QP s_lock must be held.
75 */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -070076u32 ipath_make_rc_ack(struct ipath_qp *qp,
77 struct ipath_other_headers *ohdr,
78 u32 pmtu)
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -080079{
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -080080 u32 hwords;
81 u32 len;
82 u32 bth0;
83
84 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
85 hwords = 5;
86
87 /*
88 * Send a response. Note that we are in the responder's
89 * side of the QP context.
90 */
91 switch (qp->s_ack_state) {
92 case OP(RDMA_READ_REQUEST):
Bryan O'Sullivan12eef412006-07-01 04:36:10 -070093 qp->s_cur_sge = &qp->s_rdma_sge;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -080094 len = qp->s_rdma_len;
95 if (len > pmtu) {
96 len = pmtu;
97 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -070098 } else
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -080099 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
100 qp->s_rdma_len -= len;
101 bth0 = qp->s_ack_state << 24;
102 ohdr->u.aeth = ipath_compute_aeth(qp);
103 hwords++;
104 break;
105
106 case OP(RDMA_READ_RESPONSE_FIRST):
107 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
108 /* FALLTHROUGH */
109 case OP(RDMA_READ_RESPONSE_MIDDLE):
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700110 qp->s_cur_sge = &qp->s_rdma_sge;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800111 len = qp->s_rdma_len;
112 if (len > pmtu)
113 len = pmtu;
114 else {
115 ohdr->u.aeth = ipath_compute_aeth(qp);
116 hwords++;
117 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
118 }
119 qp->s_rdma_len -= len;
120 bth0 = qp->s_ack_state << 24;
121 break;
122
123 case OP(RDMA_READ_RESPONSE_LAST):
124 case OP(RDMA_READ_RESPONSE_ONLY):
125 /*
126 * We have to prevent new requests from changing
127 * the r_sge state while a ipath_verbs_send()
128 * is in progress.
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800129 */
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800130 qp->s_ack_state = OP(ACKNOWLEDGE);
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700131 bth0 = 0;
132 goto bail;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800133
134 case OP(COMPARE_SWAP):
135 case OP(FETCH_ADD):
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700136 qp->s_cur_sge = NULL;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800137 len = 0;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700138 /*
139 * Set the s_ack_state so the receive interrupt handler
140 * won't try to send an ACK (out of order) until this one
141 * is actually sent.
142 */
143 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
144 bth0 = OP(ATOMIC_ACKNOWLEDGE) << 24;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800145 ohdr->u.at.aeth = ipath_compute_aeth(qp);
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700146 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800147 hwords += sizeof(ohdr->u.at) / 4;
148 break;
149
150 default:
151 /* Send a regular ACK. */
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700152 qp->s_cur_sge = NULL;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800153 len = 0;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700154 /*
155 * Set the s_ack_state so the receive interrupt handler
156 * won't try to send an ACK (out of order) until this one
157 * is actually sent.
158 */
159 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
160 bth0 = OP(ACKNOWLEDGE) << 24;
161 if (qp->s_nak_state)
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700162 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700163 (qp->s_nak_state <<
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700164 IPATH_AETH_CREDIT_SHIFT));
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700165 else
166 ohdr->u.aeth = ipath_compute_aeth(qp);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800167 hwords++;
168 }
169 qp->s_hdrwords = hwords;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800170 qp->s_cur_size = len;
171
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700172bail:
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800173 return bth0;
174}
175
176/**
177 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
178 * @qp: a pointer to the QP
179 * @ohdr: a pointer to the IB header being constructed
180 * @pmtu: the path MTU
181 * @bth0p: pointer to the BTH opcode word
182 * @bth2p: pointer to the BTH PSN word
183 *
184 * Return 1 if constructed; otherwise, return 0.
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700185 * Note the QP s_lock must be held and interrupts disabled.
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800186 */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700187int ipath_make_rc_req(struct ipath_qp *qp,
188 struct ipath_other_headers *ohdr,
189 u32 pmtu, u32 *bth0p, u32 *bth2p)
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800190{
191 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
192 struct ipath_sge_state *ss;
193 struct ipath_swqe *wqe;
194 u32 hwords;
195 u32 len;
196 u32 bth0;
197 u32 bth2;
198 char newreq;
199
200 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
201 qp->s_rnr_timeout)
202 goto done;
203
Bryan O'Sullivan60229432006-09-28 08:59:57 -0700204 /* Limit the number of packets sent without an ACK. */
205 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
206 qp->s_wait_credit = 1;
207 dev->n_rc_stalls++;
208 spin_lock(&dev->pending_lock);
209 if (list_empty(&qp->timerwait))
210 list_add_tail(&qp->timerwait,
211 &dev->pending[dev->pending_index]);
212 spin_unlock(&dev->pending_lock);
213 goto done;
214 }
215
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800216 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
217 hwords = 5;
218 bth0 = 0;
219
220 /* Send a request. */
221 wqe = get_swqe_ptr(qp, qp->s_cur);
222 switch (qp->s_state) {
223 default:
224 /*
225 * Resend an old request or start a new one.
226 *
227 * We keep track of the current SWQE so that
228 * we don't reset the "furthest progress" state
229 * if we need to back up.
230 */
231 newreq = 0;
232 if (qp->s_cur == qp->s_tail) {
233 /* Check if send work queue is empty. */
234 if (qp->s_tail == qp->s_head)
235 goto done;
Bryan O'Sullivan60229432006-09-28 08:59:57 -0700236 wqe->psn = qp->s_next_psn;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800237 newreq = 1;
238 }
239 /*
240 * Note that we have to be careful not to modify the
241 * original work request since we may need to resend
242 * it.
243 */
244 qp->s_sge.sge = wqe->sg_list[0];
245 qp->s_sge.sg_list = wqe->sg_list + 1;
246 qp->s_sge.num_sge = wqe->wr.num_sge;
247 qp->s_len = len = wqe->length;
248 ss = &qp->s_sge;
249 bth2 = 0;
250 switch (wqe->wr.opcode) {
251 case IB_WR_SEND:
252 case IB_WR_SEND_WITH_IMM:
253 /* If no credit, return. */
254 if (qp->s_lsn != (u32) -1 &&
255 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
256 goto done;
257 wqe->lpsn = wqe->psn;
258 if (len > pmtu) {
259 wqe->lpsn += (len - 1) / pmtu;
260 qp->s_state = OP(SEND_FIRST);
261 len = pmtu;
262 break;
263 }
264 if (wqe->wr.opcode == IB_WR_SEND)
265 qp->s_state = OP(SEND_ONLY);
266 else {
267 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
268 /* Immediate data comes after the BTH */
269 ohdr->u.imm_data = wqe->wr.imm_data;
270 hwords += 1;
271 }
272 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
273 bth0 |= 1 << 23;
274 bth2 = 1 << 31; /* Request ACK. */
275 if (++qp->s_cur == qp->s_size)
276 qp->s_cur = 0;
277 break;
278
279 case IB_WR_RDMA_WRITE:
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -0700280 if (newreq && qp->s_lsn != (u32) -1)
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800281 qp->s_lsn++;
282 /* FALLTHROUGH */
283 case IB_WR_RDMA_WRITE_WITH_IMM:
284 /* If no credit, return. */
285 if (qp->s_lsn != (u32) -1 &&
286 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
287 goto done;
288 ohdr->u.rc.reth.vaddr =
289 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
290 ohdr->u.rc.reth.rkey =
291 cpu_to_be32(wqe->wr.wr.rdma.rkey);
292 ohdr->u.rc.reth.length = cpu_to_be32(len);
293 hwords += sizeof(struct ib_reth) / 4;
294 wqe->lpsn = wqe->psn;
295 if (len > pmtu) {
296 wqe->lpsn += (len - 1) / pmtu;
297 qp->s_state = OP(RDMA_WRITE_FIRST);
298 len = pmtu;
299 break;
300 }
301 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
302 qp->s_state = OP(RDMA_WRITE_ONLY);
303 else {
304 qp->s_state =
305 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -0700306 /* Immediate data comes after RETH */
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800307 ohdr->u.rc.imm_data = wqe->wr.imm_data;
308 hwords += 1;
309 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
310 bth0 |= 1 << 23;
311 }
312 bth2 = 1 << 31; /* Request ACK. */
313 if (++qp->s_cur == qp->s_size)
314 qp->s_cur = 0;
315 break;
316
317 case IB_WR_RDMA_READ:
318 ohdr->u.rc.reth.vaddr =
319 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
320 ohdr->u.rc.reth.rkey =
321 cpu_to_be32(wqe->wr.wr.rdma.rkey);
322 ohdr->u.rc.reth.length = cpu_to_be32(len);
323 qp->s_state = OP(RDMA_READ_REQUEST);
324 hwords += sizeof(ohdr->u.rc.reth) / 4;
325 if (newreq) {
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -0700326 if (qp->s_lsn != (u32) -1)
327 qp->s_lsn++;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800328 /*
329 * Adjust s_next_psn to count the
330 * expected number of responses.
331 */
332 if (len > pmtu)
333 qp->s_next_psn += (len - 1) / pmtu;
334 wqe->lpsn = qp->s_next_psn++;
335 }
336 ss = NULL;
337 len = 0;
338 if (++qp->s_cur == qp->s_size)
339 qp->s_cur = 0;
340 break;
341
342 case IB_WR_ATOMIC_CMP_AND_SWP:
343 case IB_WR_ATOMIC_FETCH_AND_ADD:
344 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP)
345 qp->s_state = OP(COMPARE_SWAP);
346 else
347 qp->s_state = OP(FETCH_ADD);
348 ohdr->u.atomic_eth.vaddr = cpu_to_be64(
349 wqe->wr.wr.atomic.remote_addr);
350 ohdr->u.atomic_eth.rkey = cpu_to_be32(
351 wqe->wr.wr.atomic.rkey);
352 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
353 wqe->wr.wr.atomic.swap);
354 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
355 wqe->wr.wr.atomic.compare_add);
356 hwords += sizeof(struct ib_atomic_eth) / 4;
357 if (newreq) {
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -0700358 if (qp->s_lsn != (u32) -1)
359 qp->s_lsn++;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800360 wqe->lpsn = wqe->psn;
361 }
362 if (++qp->s_cur == qp->s_size)
363 qp->s_cur = 0;
364 ss = NULL;
365 len = 0;
366 break;
367
368 default:
369 goto done;
370 }
371 if (newreq) {
372 qp->s_tail++;
373 if (qp->s_tail >= qp->s_size)
374 qp->s_tail = 0;
375 }
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700376 bth2 |= qp->s_psn++ & IPATH_PSN_MASK;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800377 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
378 qp->s_next_psn = qp->s_psn;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700379 /*
380 * Put the QP on the pending list so lost ACKs will cause
381 * a retry. More than one request can be pending so the
382 * QP may already be on the dev->pending list.
383 */
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800384 spin_lock(&dev->pending_lock);
Bryan O'Sullivan94b8d9f2006-05-23 11:32:32 -0700385 if (list_empty(&qp->timerwait))
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800386 list_add_tail(&qp->timerwait,
387 &dev->pending[dev->pending_index]);
388 spin_unlock(&dev->pending_lock);
389 break;
390
391 case OP(RDMA_READ_RESPONSE_FIRST):
392 /*
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700393 * This case can only happen if a send is restarted.
394 * See ipath_restart_rc().
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800395 */
396 ipath_init_restart(qp, wqe);
397 /* FALLTHROUGH */
398 case OP(SEND_FIRST):
399 qp->s_state = OP(SEND_MIDDLE);
400 /* FALLTHROUGH */
401 case OP(SEND_MIDDLE):
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700402 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800403 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
404 qp->s_next_psn = qp->s_psn;
405 ss = &qp->s_sge;
406 len = qp->s_len;
407 if (len > pmtu) {
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800408 len = pmtu;
409 break;
410 }
411 if (wqe->wr.opcode == IB_WR_SEND)
412 qp->s_state = OP(SEND_LAST);
413 else {
414 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
415 /* Immediate data comes after the BTH */
416 ohdr->u.imm_data = wqe->wr.imm_data;
417 hwords += 1;
418 }
419 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
420 bth0 |= 1 << 23;
421 bth2 |= 1 << 31; /* Request ACK. */
422 qp->s_cur++;
423 if (qp->s_cur >= qp->s_size)
424 qp->s_cur = 0;
425 break;
426
427 case OP(RDMA_READ_RESPONSE_LAST):
428 /*
429 * This case can only happen if a RDMA write is restarted.
430 * See ipath_restart_rc().
431 */
432 ipath_init_restart(qp, wqe);
433 /* FALLTHROUGH */
434 case OP(RDMA_WRITE_FIRST):
435 qp->s_state = OP(RDMA_WRITE_MIDDLE);
436 /* FALLTHROUGH */
437 case OP(RDMA_WRITE_MIDDLE):
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700438 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800439 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
440 qp->s_next_psn = qp->s_psn;
441 ss = &qp->s_sge;
442 len = qp->s_len;
443 if (len > pmtu) {
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800444 len = pmtu;
445 break;
446 }
447 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
448 qp->s_state = OP(RDMA_WRITE_LAST);
449 else {
450 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
451 /* Immediate data comes after the BTH */
452 ohdr->u.imm_data = wqe->wr.imm_data;
453 hwords += 1;
454 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
455 bth0 |= 1 << 23;
456 }
457 bth2 |= 1 << 31; /* Request ACK. */
458 qp->s_cur++;
459 if (qp->s_cur >= qp->s_size)
460 qp->s_cur = 0;
461 break;
462
463 case OP(RDMA_READ_RESPONSE_MIDDLE):
464 /*
465 * This case can only happen if a RDMA read is restarted.
466 * See ipath_restart_rc().
467 */
468 ipath_init_restart(qp, wqe);
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700469 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800470 ohdr->u.rc.reth.vaddr =
471 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
472 ohdr->u.rc.reth.rkey =
473 cpu_to_be32(wqe->wr.wr.rdma.rkey);
474 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
475 qp->s_state = OP(RDMA_READ_REQUEST);
476 hwords += sizeof(ohdr->u.rc.reth) / 4;
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700477 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800478 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
479 qp->s_next_psn = qp->s_psn;
480 ss = NULL;
481 len = 0;
482 qp->s_cur++;
483 if (qp->s_cur == qp->s_size)
484 qp->s_cur = 0;
485 break;
486
487 case OP(RDMA_READ_REQUEST):
488 case OP(COMPARE_SWAP):
489 case OP(FETCH_ADD):
490 /*
491 * We shouldn't start anything new until this request is
492 * finished. The ACK will handle rescheduling us. XXX The
493 * number of outstanding ones is negotiated at connection
494 * setup time (see pg. 258,289)? XXX Also, if we support
495 * multiple outstanding requests, we need to check the WQE
496 * IB_SEND_FENCE flag and not send a new request if a RDMA
497 * read or atomic is pending.
498 */
499 goto done;
500 }
Bryan O'Sullivan60229432006-09-28 08:59:57 -0700501 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
502 bth2 |= 1 << 31; /* Request ACK. */
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800503 qp->s_len -= len;
504 qp->s_hdrwords = hwords;
505 qp->s_cur_sge = ss;
506 qp->s_cur_size = len;
507 *bth0p = bth0 | (qp->s_state << 24);
508 *bth2p = bth2;
509 return 1;
510
511done:
512 return 0;
513}
514
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800515/**
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700516 * send_rc_ack - Construct an ACK packet and send it
517 * @qp: a pointer to the QP
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800518 *
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700519 * This is called from ipath_rc_rcv() and only uses the receive
520 * side QP state.
521 * Note that RDMA reads are handled in the send side QP state and tasklet.
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800522 */
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800523static void send_rc_ack(struct ipath_qp *qp)
524{
525 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
526 u16 lrh0;
527 u32 bth0;
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700528 u32 hwords;
529 struct ipath_ib_header hdr;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800530 struct ipath_other_headers *ohdr;
531
532 /* Construct the header. */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700533 ohdr = &hdr.u.oth;
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700534 lrh0 = IPATH_LRH_BTH;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800535 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700536 hwords = 6;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800537 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700538 hwords += ipath_make_grh(dev, &hdr.u.l.grh,
539 &qp->remote_ah_attr.grh,
540 hwords, 0);
541 ohdr = &hdr.u.l.oth;
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700542 lrh0 = IPATH_LRH_GRH;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800543 }
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700544 /* read pkey_index w/o lock (its atomic) */
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -0700545 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index);
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700546 if (qp->r_nak_state)
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700547 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700548 (qp->r_nak_state <<
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700549 IPATH_AETH_CREDIT_SHIFT));
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700550 else
551 ohdr->u.aeth = ipath_compute_aeth(qp);
552 if (qp->r_ack_state >= OP(COMPARE_SWAP)) {
553 bth0 |= OP(ATOMIC_ACKNOWLEDGE) << 24;
554 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700555 hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
556 } else
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800557 bth0 |= OP(ACKNOWLEDGE) << 24;
558 lrh0 |= qp->remote_ah_attr.sl << 4;
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700559 hdr.lrh[0] = cpu_to_be16(lrh0);
560 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
561 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -0700562 hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800563 ohdr->bth[0] = cpu_to_be32(bth0);
564 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700565 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800566
567 /*
568 * If we can send the ACK, clear the ACK state.
569 */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -0700570 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700571 qp->r_ack_state = OP(ACKNOWLEDGE);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800572 dev->n_unicast_xmit++;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700573 } else {
574 /*
575 * We are out of PIO buffers at the moment.
576 * Pass responsibility for sending the ACK to the
577 * send tasklet so that when a PIO buffer becomes
578 * available, the ACK is sent ahead of other outgoing
579 * packets.
580 */
581 dev->n_rc_qacks++;
582 spin_lock_irq(&qp->s_lock);
583 /* Don't coalesce if a RDMA read or atomic is pending. */
584 if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
585 qp->s_ack_state < OP(RDMA_READ_REQUEST)) {
586 qp->s_ack_state = qp->r_ack_state;
587 qp->s_nak_state = qp->r_nak_state;
588 qp->s_ack_psn = qp->r_ack_psn;
589 qp->r_ack_state = OP(ACKNOWLEDGE);
590 }
591 spin_unlock_irq(&qp->s_lock);
592
593 /* Call ipath_do_rc_send() in another thread. */
594 tasklet_hi_schedule(&qp->s_task);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800595 }
596}
597
598/**
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -0700599 * reset_psn - reset the QP state to send starting from PSN
600 * @qp: the QP
601 * @psn: the packet sequence number to restart at
602 *
603 * This is called from ipath_rc_rcv() to process an incoming RC ACK
604 * for the given QP.
605 * Called at interrupt level with the QP s_lock held.
606 */
607static void reset_psn(struct ipath_qp *qp, u32 psn)
608{
609 u32 n = qp->s_last;
610 struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
611 u32 opcode;
612
613 qp->s_cur = n;
614
615 /*
616 * If we are starting the request from the beginning,
617 * let the normal send code handle initialization.
618 */
619 if (ipath_cmp24(psn, wqe->psn) <= 0) {
620 qp->s_state = OP(SEND_LAST);
621 goto done;
622 }
623
624 /* Find the work request opcode corresponding to the given PSN. */
625 opcode = wqe->wr.opcode;
626 for (;;) {
627 int diff;
628
629 if (++n == qp->s_size)
630 n = 0;
631 if (n == qp->s_tail)
632 break;
633 wqe = get_swqe_ptr(qp, n);
634 diff = ipath_cmp24(psn, wqe->psn);
635 if (diff < 0)
636 break;
637 qp->s_cur = n;
638 /*
639 * If we are starting the request from the beginning,
640 * let the normal send code handle initialization.
641 */
642 if (diff == 0) {
643 qp->s_state = OP(SEND_LAST);
644 goto done;
645 }
646 opcode = wqe->wr.opcode;
647 }
648
649 /*
650 * Set the state to restart in the middle of a request.
651 * Don't change the s_sge, s_cur_sge, or s_cur_size.
652 * See ipath_do_rc_send().
653 */
654 switch (opcode) {
655 case IB_WR_SEND:
656 case IB_WR_SEND_WITH_IMM:
657 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
658 break;
659
660 case IB_WR_RDMA_WRITE:
661 case IB_WR_RDMA_WRITE_WITH_IMM:
662 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
663 break;
664
665 case IB_WR_RDMA_READ:
666 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
667 break;
668
669 default:
670 /*
671 * This case shouldn't happen since its only
672 * one PSN per req.
673 */
674 qp->s_state = OP(SEND_LAST);
675 }
676done:
677 qp->s_psn = psn;
678}
679
680/**
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800681 * ipath_restart_rc - back up requester to resend the last un-ACKed request
682 * @qp: the QP to restart
683 * @psn: packet sequence number for the request
684 * @wc: the work completion request
685 *
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700686 * The QP s_lock should be held and interrupts disabled.
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800687 */
688void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
689{
690 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
691 struct ipath_ibdev *dev;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800692
693 /*
694 * If there are no requests pending, we are done.
695 */
696 if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
697 qp->s_last == qp->s_tail)
698 goto done;
699
700 if (qp->s_retry == 0) {
701 wc->wr_id = wqe->wr.wr_id;
702 wc->status = IB_WC_RETRY_EXC_ERR;
703 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
704 wc->vendor_err = 0;
705 wc->byte_len = 0;
706 wc->qp_num = qp->ibqp.qp_num;
707 wc->src_qp = qp->remote_qpn;
708 wc->pkey_index = 0;
709 wc->slid = qp->remote_ah_attr.dlid;
710 wc->sl = qp->remote_ah_attr.sl;
711 wc->dlid_path_bits = 0;
712 wc->port_num = 0;
713 ipath_sqerror_qp(qp, wc);
714 goto bail;
715 }
716 qp->s_retry--;
717
718 /*
719 * Remove the QP from the timeout queue.
720 * Note: it may already have been removed by ipath_ib_timer().
721 */
722 dev = to_idev(qp->ibqp.device);
723 spin_lock(&dev->pending_lock);
Bryan O'Sullivan94b8d9f2006-05-23 11:32:32 -0700724 if (!list_empty(&qp->timerwait))
725 list_del_init(&qp->timerwait);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800726 spin_unlock(&dev->pending_lock);
727
728 if (wqe->wr.opcode == IB_WR_RDMA_READ)
729 dev->n_rc_resends++;
730 else
731 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
732
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -0700733 reset_psn(qp, psn);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800734
735done:
736 tasklet_hi_schedule(&qp->s_task);
737
738bail:
739 return;
740}
741
Bryan O'Sullivan60229432006-09-28 08:59:57 -0700742static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
743{
744 if (qp->s_wait_credit) {
745 qp->s_wait_credit = 0;
746 tasklet_hi_schedule(&qp->s_task);
747 }
748 qp->s_last_psn = psn;
749}
750
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800751/**
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800752 * do_rc_ack - process an incoming RC ACK
753 * @qp: the QP the ACK came in on
754 * @psn: the packet sequence number of the ACK
755 * @opcode: the opcode of the request that resulted in the ACK
756 *
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -0700757 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800758 * for the given QP.
Bryan O'Sullivan12eef412006-07-01 04:36:10 -0700759 * Called at interrupt level with the QP s_lock held and interrupts disabled.
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800760 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
761 */
762static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
763{
764 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
765 struct ib_wc wc;
766 struct ipath_swqe *wqe;
767 int ret = 0;
768
769 /*
770 * Remove the QP from the timeout queue (or RNR timeout queue).
771 * If ipath_ib_timer() has already removed it,
772 * it's OK since we hold the QP s_lock and ipath_restart_rc()
773 * just won't find anything to restart if we ACK everything.
774 */
775 spin_lock(&dev->pending_lock);
Bryan O'Sullivan94b8d9f2006-05-23 11:32:32 -0700776 if (!list_empty(&qp->timerwait))
777 list_del_init(&qp->timerwait);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800778 spin_unlock(&dev->pending_lock);
779
780 /*
781 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
782 * requests and implicitly NAK RDMA read and atomic requests issued
783 * before the NAK'ed request. The MSN won't include the NAK'ed
784 * request but will include an ACK'ed request(s).
785 */
786 wqe = get_swqe_ptr(qp, qp->s_last);
787
788 /* Nothing is pending to ACK/NAK. */
789 if (qp->s_last == qp->s_tail)
790 goto bail;
791
792 /*
793 * The MSN might be for a later WQE than the PSN indicates so
794 * only complete WQEs that the PSN finishes.
795 */
796 while (ipath_cmp24(psn, wqe->lpsn) >= 0) {
797 /* If we are ACKing a WQE, the MSN should be >= the SSN. */
798 if (ipath_cmp24(aeth, wqe->ssn) < 0)
799 break;
800 /*
801 * If this request is a RDMA read or atomic, and the ACK is
802 * for a later operation, this ACK NAKs the RDMA read or
803 * atomic. In other words, only a RDMA_READ_LAST or ONLY
804 * can ACK a RDMA read and likewise for atomic ops. Note
805 * that the NAK case can only happen if relaxed ordering is
806 * used and requests are sent after an RDMA read or atomic
807 * is sent but before the response is received.
808 */
809 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
810 opcode != OP(RDMA_READ_RESPONSE_LAST)) ||
811 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
812 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
813 (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
814 ipath_cmp24(wqe->psn, psn) != 0))) {
815 /*
816 * The last valid PSN seen is the previous
817 * request's.
818 */
Bryan O'Sullivan60229432006-09-28 08:59:57 -0700819 update_last_psn(qp, wqe->psn - 1);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800820 /* Retry this request. */
821 ipath_restart_rc(qp, wqe->psn, &wc);
822 /*
823 * No need to process the ACK/NAK since we are
824 * restarting an earlier request.
825 */
826 goto bail;
827 }
828 /* Post a send completion queue entry if requested. */
829 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
830 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
831 wc.wr_id = wqe->wr.wr_id;
832 wc.status = IB_WC_SUCCESS;
833 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
834 wc.vendor_err = 0;
835 wc.byte_len = wqe->length;
836 wc.qp_num = qp->ibqp.qp_num;
837 wc.src_qp = qp->remote_qpn;
838 wc.pkey_index = 0;
839 wc.slid = qp->remote_ah_attr.dlid;
840 wc.sl = qp->remote_ah_attr.sl;
841 wc.dlid_path_bits = 0;
842 wc.port_num = 0;
843 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
844 }
845 qp->s_retry = qp->s_retry_cnt;
846 /*
847 * If we are completing a request which is in the process of
848 * being resent, we can stop resending it since we know the
849 * responder has already seen it.
850 */
851 if (qp->s_last == qp->s_cur) {
852 if (++qp->s_cur >= qp->s_size)
853 qp->s_cur = 0;
854 wqe = get_swqe_ptr(qp, qp->s_cur);
855 qp->s_state = OP(SEND_LAST);
856 qp->s_psn = wqe->psn;
857 }
858 if (++qp->s_last >= qp->s_size)
859 qp->s_last = 0;
860 wqe = get_swqe_ptr(qp, qp->s_last);
861 if (qp->s_last == qp->s_tail)
862 break;
863 }
864
865 switch (aeth >> 29) {
866 case 0: /* ACK */
867 dev->n_rc_acks++;
868 /* If this is a partial ACK, reset the retransmit timer. */
869 if (qp->s_last != qp->s_tail) {
870 spin_lock(&dev->pending_lock);
871 list_add_tail(&qp->timerwait,
872 &dev->pending[dev->pending_index]);
873 spin_unlock(&dev->pending_lock);
874 }
875 ipath_get_credit(qp, aeth);
876 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
877 qp->s_retry = qp->s_retry_cnt;
Bryan O'Sullivan60229432006-09-28 08:59:57 -0700878 update_last_psn(qp, psn);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800879 ret = 1;
880 goto bail;
881
882 case 1: /* RNR NAK */
883 dev->n_rnr_naks++;
884 if (qp->s_rnr_retry == 0) {
885 if (qp->s_last == qp->s_tail)
886 goto bail;
887
888 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
889 goto class_b;
890 }
891 if (qp->s_rnr_retry_cnt < 7)
892 qp->s_rnr_retry--;
893 if (qp->s_last == qp->s_tail)
894 goto bail;
895
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -0700896 /* The last valid PSN is the previous PSN. */
Bryan O'Sullivan60229432006-09-28 08:59:57 -0700897 update_last_psn(qp, psn - 1);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800898
899 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
900
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -0700901 reset_psn(qp, psn);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800902
903 qp->s_rnr_timeout =
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700904 ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
905 IPATH_AETH_CREDIT_MASK];
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800906 ipath_insert_rnr_queue(qp);
907 goto bail;
908
909 case 3: /* NAK */
910 /* The last valid PSN seen is the previous request's. */
911 if (qp->s_last != qp->s_tail)
Bryan O'Sullivan60229432006-09-28 08:59:57 -0700912 update_last_psn(qp, wqe->psn - 1);
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -0700913 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
914 IPATH_AETH_CREDIT_MASK) {
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -0800915 case 0: /* PSN sequence error */
916 dev->n_seq_naks++;
917 /*
918 * Back up to the responder's expected PSN. XXX
919 * Note that we might get a NAK in the middle of an
920 * RDMA READ response which terminates the RDMA
921 * READ.
922 */
923 if (qp->s_last == qp->s_tail)
924 break;
925
926 if (ipath_cmp24(psn, wqe->psn) < 0)
927 break;
928
929 /* Retry the request. */
930 ipath_restart_rc(qp, psn, &wc);
931 break;
932
933 case 1: /* Invalid Request */
934 wc.status = IB_WC_REM_INV_REQ_ERR;
935 dev->n_other_naks++;
936 goto class_b;
937
938 case 2: /* Remote Access Error */
939 wc.status = IB_WC_REM_ACCESS_ERR;
940 dev->n_other_naks++;
941 goto class_b;
942
943 case 3: /* Remote Operation Error */
944 wc.status = IB_WC_REM_OP_ERR;
945 dev->n_other_naks++;
946 class_b:
947 wc.wr_id = wqe->wr.wr_id;
948 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
949 wc.vendor_err = 0;
950 wc.byte_len = 0;
951 wc.qp_num = qp->ibqp.qp_num;
952 wc.src_qp = qp->remote_qpn;
953 wc.pkey_index = 0;
954 wc.slid = qp->remote_ah_attr.dlid;
955 wc.sl = qp->remote_ah_attr.sl;
956 wc.dlid_path_bits = 0;
957 wc.port_num = 0;
958 ipath_sqerror_qp(qp, &wc);
959 break;
960
961 default:
962 /* Ignore other reserved NAK error codes */
963 goto reserved;
964 }
965 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
966 goto bail;
967
968 default: /* 2: reserved */
969 reserved:
970 /* Ignore reserved NAK codes. */
971 goto bail;
972 }
973
974bail:
975 return ret;
976}
977
978/**
979 * ipath_rc_rcv_resp - process an incoming RC response packet
980 * @dev: the device this packet came in on
981 * @ohdr: the other headers for this packet
982 * @data: the packet data
983 * @tlen: the packet length
984 * @qp: the QP for this packet
985 * @opcode: the opcode for this packet
986 * @psn: the packet sequence number for this packet
987 * @hdrsize: the header length
988 * @pmtu: the path MTU
989 * @header_in_data: true if part of the header data is in the data buffer
990 *
991 * This is called from ipath_rc_rcv() to process an incoming RC response
992 * packet for the given QP.
993 * Called at interrupt level.
994 */
995static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
996 struct ipath_other_headers *ohdr,
997 void *data, u32 tlen,
998 struct ipath_qp *qp,
999 u32 opcode,
1000 u32 psn, u32 hdrsize, u32 pmtu,
1001 int header_in_data)
1002{
1003 unsigned long flags;
1004 struct ib_wc wc;
1005 int diff;
1006 u32 pad;
1007 u32 aeth;
1008
1009 spin_lock_irqsave(&qp->s_lock, flags);
1010
1011 /* Ignore invalid responses. */
1012 if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1013 goto ack_done;
1014
1015 /* Ignore duplicate responses. */
1016 diff = ipath_cmp24(psn, qp->s_last_psn);
1017 if (unlikely(diff <= 0)) {
1018 /* Update credits for "ghost" ACKs */
1019 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1020 if (!header_in_data)
1021 aeth = be32_to_cpu(ohdr->u.aeth);
1022 else {
1023 aeth = be32_to_cpu(((__be32 *) data)[0]);
1024 data += sizeof(__be32);
1025 }
1026 if ((aeth >> 29) == 0)
1027 ipath_get_credit(qp, aeth);
1028 }
1029 goto ack_done;
1030 }
1031
1032 switch (opcode) {
1033 case OP(ACKNOWLEDGE):
1034 case OP(ATOMIC_ACKNOWLEDGE):
1035 case OP(RDMA_READ_RESPONSE_FIRST):
1036 if (!header_in_data)
1037 aeth = be32_to_cpu(ohdr->u.aeth);
1038 else {
1039 aeth = be32_to_cpu(((__be32 *) data)[0]);
1040 data += sizeof(__be32);
1041 }
1042 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1043 *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data;
1044 if (!do_rc_ack(qp, aeth, psn, opcode) ||
1045 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1046 goto ack_done;
1047 hdrsize += 4;
1048 /*
1049 * do_rc_ack() has already checked the PSN so skip
1050 * the sequence check.
1051 */
1052 goto rdma_read;
1053
1054 case OP(RDMA_READ_RESPONSE_MIDDLE):
1055 /* no AETH, no ACK */
1056 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1057 dev->n_rdma_seq++;
1058 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1059 goto ack_done;
1060 }
1061 rdma_read:
Bryan O'Sullivan7bbb15e2006-07-01 04:35:51 -07001062 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1063 goto ack_done;
1064 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1065 goto ack_done;
1066 if (unlikely(pmtu >= qp->s_len))
1067 goto ack_done;
1068 /* We got a response so update the timeout. */
1069 if (unlikely(qp->s_last == qp->s_tail ||
1070 get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
1071 IB_WR_RDMA_READ))
1072 goto ack_done;
1073 spin_lock(&dev->pending_lock);
1074 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1075 list_move_tail(&qp->timerwait,
1076 &dev->pending[dev->pending_index]);
1077 spin_unlock(&dev->pending_lock);
1078 /*
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -07001079 * Update the RDMA receive state but do the copy w/o
1080 * holding the locks and blocking interrupts.
1081 * XXX Yet another place that affects relaxed RDMA order
1082 * since we don't want s_sge modified.
Bryan O'Sullivan7bbb15e2006-07-01 04:35:51 -07001083 */
1084 qp->s_len -= pmtu;
Bryan O'Sullivan60229432006-09-28 08:59:57 -07001085 update_last_psn(qp, psn);
Bryan O'Sullivan7bbb15e2006-07-01 04:35:51 -07001086 spin_unlock_irqrestore(&qp->s_lock, flags);
1087 ipath_copy_sge(&qp->s_sge, data, pmtu);
1088 goto bail;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001089
1090 case OP(RDMA_READ_RESPONSE_LAST):
1091 /* ACKs READ req. */
1092 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1093 dev->n_rdma_seq++;
1094 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1095 goto ack_done;
1096 }
1097 /* FALLTHROUGH */
1098 case OP(RDMA_READ_RESPONSE_ONLY):
1099 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1100 goto ack_done;
1101 /*
1102 * Get the number of bytes the message was padded by.
1103 */
1104 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1105 /*
1106 * Check that the data size is >= 1 && <= pmtu.
1107 * Remember to account for the AETH header (4) and
1108 * ICRC (4).
1109 */
1110 if (unlikely(tlen <= (hdrsize + pad + 8))) {
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -07001111 /* XXX Need to generate an error CQ entry. */
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001112 goto ack_done;
1113 }
1114 tlen -= hdrsize + pad + 8;
1115 if (unlikely(tlen != qp->s_len)) {
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -07001116 /* XXX Need to generate an error CQ entry. */
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001117 goto ack_done;
1118 }
1119 if (!header_in_data)
1120 aeth = be32_to_cpu(ohdr->u.aeth);
1121 else {
1122 aeth = be32_to_cpu(((__be32 *) data)[0]);
1123 data += sizeof(__be32);
1124 }
1125 ipath_copy_sge(&qp->s_sge, data, tlen);
1126 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
1127 /*
1128 * Change the state so we contimue
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -07001129 * processing new requests and wake up the
1130 * tasklet if there are posted sends.
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001131 */
1132 qp->s_state = OP(SEND_LAST);
Bryan O'Sullivan6700efd2006-07-01 04:35:51 -07001133 if (qp->s_tail != qp->s_head)
1134 tasklet_hi_schedule(&qp->s_task);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001135 }
1136 goto ack_done;
1137 }
1138
1139ack_done:
1140 spin_unlock_irqrestore(&qp->s_lock, flags);
1141bail:
1142 return;
1143}
1144
1145/**
1146 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1147 * @dev: the device this packet came in on
1148 * @ohdr: the other headers for this packet
1149 * @data: the packet data
1150 * @qp: the QP for this packet
1151 * @opcode: the opcode for this packet
1152 * @psn: the packet sequence number for this packet
1153 * @diff: the difference between the PSN and the expected PSN
1154 * @header_in_data: true if part of the header data is in the data buffer
1155 *
1156 * This is called from ipath_rc_rcv() to process an unexpected
1157 * incoming RC packet for the given QP.
1158 * Called at interrupt level.
1159 * Return 1 if no more processing is needed; otherwise return 0 to
1160 * schedule a response to be sent and the s_lock unlocked.
1161 */
1162static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1163 struct ipath_other_headers *ohdr,
1164 void *data,
1165 struct ipath_qp *qp,
1166 u32 opcode,
1167 u32 psn,
1168 int diff,
1169 int header_in_data)
1170{
1171 struct ib_reth *reth;
1172
1173 if (diff > 0) {
1174 /*
1175 * Packet sequence error.
1176 * A NAK will ACK earlier sends and RDMA writes.
1177 * Don't queue the NAK if a RDMA read, atomic, or
1178 * NAK is pending though.
1179 */
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001180 if (qp->s_ack_state != OP(ACKNOWLEDGE) ||
1181 qp->r_nak_state != 0)
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001182 goto done;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001183 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1184 qp->r_ack_state = OP(SEND_ONLY);
1185 qp->r_nak_state = IB_NAK_PSN_ERROR;
1186 /* Use the expected PSN. */
1187 qp->r_ack_psn = qp->r_psn;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001188 }
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001189 goto send_ack;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001190 }
1191
1192 /*
1193 * Handle a duplicate request. Don't re-execute SEND, RDMA
1194 * write or atomic op. Don't NAK errors, just silently drop
1195 * the duplicate request. Note that r_sge, r_len, and
1196 * r_rcv_len may be in use so don't modify them.
1197 *
1198 * We are supposed to ACK the earliest duplicate PSN but we
1199 * can coalesce an outstanding duplicate ACK. We have to
1200 * send the earliest so that RDMA reads can be restarted at
1201 * the requester's expected PSN.
1202 */
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001203 if (opcode == OP(RDMA_READ_REQUEST)) {
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001204 /* RETH comes after BTH */
1205 if (!header_in_data)
1206 reth = &ohdr->u.rc.reth;
1207 else {
1208 reth = (struct ib_reth *)data;
1209 data += sizeof(*reth);
1210 }
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001211 /*
1212 * If we receive a duplicate RDMA request, it means the
1213 * requester saw a sequence error and needs to restart
1214 * from an earlier point. We can abort the current
1215 * RDMA read send in that case.
1216 */
1217 spin_lock_irq(&qp->s_lock);
1218 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1219 (qp->s_hdrwords || ipath_cmp24(psn, qp->s_ack_psn) >= 0)) {
1220 /*
1221 * We are already sending earlier requested data.
1222 * Don't abort it to send later out of sequence data.
1223 */
1224 spin_unlock_irq(&qp->s_lock);
1225 goto done;
1226 }
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001227 qp->s_rdma_len = be32_to_cpu(reth->length);
1228 if (qp->s_rdma_len != 0) {
1229 u32 rkey = be32_to_cpu(reth->rkey);
1230 u64 vaddr = be64_to_cpu(reth->vaddr);
1231 int ok;
1232
1233 /*
1234 * Address range must be a subset of the original
1235 * request and start on pmtu boundaries.
1236 */
1237 ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1238 qp->s_rdma_len, vaddr, rkey,
1239 IB_ACCESS_REMOTE_READ);
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001240 if (unlikely(!ok)) {
1241 spin_unlock_irq(&qp->s_lock);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001242 goto done;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001243 }
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001244 } else {
1245 qp->s_rdma_sge.sg_list = NULL;
1246 qp->s_rdma_sge.num_sge = 0;
1247 qp->s_rdma_sge.sge.mr = NULL;
1248 qp->s_rdma_sge.sge.vaddr = NULL;
1249 qp->s_rdma_sge.sge.length = 0;
1250 qp->s_rdma_sge.sge.sge_length = 0;
1251 }
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001252 qp->s_ack_state = opcode;
1253 qp->s_ack_psn = psn;
1254 spin_unlock_irq(&qp->s_lock);
1255 tasklet_hi_schedule(&qp->s_task);
1256 goto send_ack;
1257 }
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001258
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001259 /*
1260 * A pending RDMA read will ACK anything before it so
1261 * ignore earlier duplicate requests.
1262 */
1263 if (qp->s_ack_state != OP(ACKNOWLEDGE))
1264 goto done;
1265
1266 /*
1267 * If an ACK is pending, don't replace the pending ACK
1268 * with an earlier one since the later one will ACK the earlier.
1269 * Also, if we already have a pending atomic, send it.
1270 */
1271 if (qp->r_ack_state != OP(ACKNOWLEDGE) &&
1272 (ipath_cmp24(psn, qp->r_ack_psn) <= 0 ||
1273 qp->r_ack_state >= OP(COMPARE_SWAP)))
1274 goto send_ack;
1275 switch (opcode) {
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001276 case OP(COMPARE_SWAP):
1277 case OP(FETCH_ADD):
1278 /*
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -07001279 * Check for the PSN of the last atomic operation
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001280 * performed and resend the result if found.
1281 */
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -07001282 if ((psn & IPATH_PSN_MASK) != qp->r_atomic_psn)
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001283 goto done;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001284 break;
1285 }
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001286 qp->r_ack_state = opcode;
1287 qp->r_nak_state = 0;
1288 qp->r_ack_psn = psn;
1289send_ack:
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001290 return 0;
1291
1292done:
1293 return 1;
1294}
1295
1296/**
1297 * ipath_rc_rcv - process an incoming RC packet
1298 * @dev: the device this packet came in on
1299 * @hdr: the header of this packet
1300 * @has_grh: true if the header has a GRH
1301 * @data: the packet data
1302 * @tlen: the packet length
1303 * @qp: the QP for this packet
1304 *
1305 * This is called from ipath_qp_rcv() to process an incoming RC packet
1306 * for the given QP.
1307 * Called at interrupt level.
1308 */
1309void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1310 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1311{
1312 struct ipath_other_headers *ohdr;
1313 u32 opcode;
1314 u32 hdrsize;
1315 u32 psn;
1316 u32 pad;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001317 struct ib_wc wc;
1318 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1319 int diff;
1320 struct ib_reth *reth;
1321 int header_in_data;
1322
1323 /* Check for GRH */
1324 if (!has_grh) {
1325 ohdr = &hdr->u.oth;
1326 hdrsize = 8 + 12; /* LRH + BTH */
1327 psn = be32_to_cpu(ohdr->bth[2]);
1328 header_in_data = 0;
1329 } else {
1330 ohdr = &hdr->u.l.oth;
1331 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1332 /*
1333 * The header with GRH is 60 bytes and the core driver sets
1334 * the eager header buffer size to 56 bytes so the last 4
1335 * bytes of the BTH header (PSN) is in the data buffer.
1336 */
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -07001337 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001338 if (header_in_data) {
1339 psn = be32_to_cpu(((__be32 *) data)[0]);
1340 data += sizeof(__be32);
1341 } else
1342 psn = be32_to_cpu(ohdr->bth[2]);
1343 }
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001344
1345 /*
1346 * Process responses (ACKs) before anything else. Note that the
1347 * packet sequence number will be for something in the send work
1348 * queue rather than the expected receive packet sequence number.
1349 * In other words, this QP is the requester.
1350 */
Bryan O'Sullivanddd4bb22006-07-01 04:35:50 -07001351 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001352 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1353 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1354 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1355 hdrsize, pmtu, header_in_data);
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001356 goto done;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001357 }
1358
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001359 /* Compute 24 bits worth of difference. */
1360 diff = ipath_cmp24(psn, qp->r_psn);
1361 if (unlikely(diff)) {
1362 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1363 psn, diff, header_in_data))
1364 goto done;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001365 goto send_ack;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001366 }
1367
1368 /* Check for opcode sequence errors. */
1369 switch (qp->r_state) {
1370 case OP(SEND_FIRST):
1371 case OP(SEND_MIDDLE):
1372 if (opcode == OP(SEND_MIDDLE) ||
1373 opcode == OP(SEND_LAST) ||
1374 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1375 break;
1376 nack_inv:
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001377 /*
1378 * A NAK will ACK earlier sends and RDMA writes.
1379 * Don't queue the NAK if a RDMA read, atomic, or NAK
1380 * is pending though.
1381 */
1382 if (qp->r_ack_state >= OP(COMPARE_SWAP))
1383 goto send_ack;
1384 /* XXX Flush WQEs */
1385 qp->state = IB_QPS_ERR;
1386 qp->r_ack_state = OP(SEND_ONLY);
1387 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1388 qp->r_ack_psn = qp->r_psn;
1389 goto send_ack;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001390
1391 case OP(RDMA_WRITE_FIRST):
1392 case OP(RDMA_WRITE_MIDDLE):
1393 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1394 opcode == OP(RDMA_WRITE_LAST) ||
1395 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1396 break;
1397 goto nack_inv;
1398
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001399 default:
1400 if (opcode == OP(SEND_MIDDLE) ||
1401 opcode == OP(SEND_LAST) ||
1402 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1403 opcode == OP(RDMA_WRITE_MIDDLE) ||
1404 opcode == OP(RDMA_WRITE_LAST) ||
1405 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1406 goto nack_inv;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001407 /*
1408 * Note that it is up to the requester to not send a new
1409 * RDMA read or atomic operation before receiving an ACK
1410 * for the previous operation.
1411 */
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001412 break;
1413 }
1414
1415 wc.imm_data = 0;
1416 wc.wc_flags = 0;
1417
1418 /* OK, process the packet. */
1419 switch (opcode) {
1420 case OP(SEND_FIRST):
1421 if (!ipath_get_rwqe(qp, 0)) {
1422 rnr_nak:
1423 /*
1424 * A RNR NAK will ACK earlier sends and RDMA writes.
1425 * Don't queue the NAK if a RDMA read or atomic
1426 * is pending though.
1427 */
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001428 if (qp->r_ack_state >= OP(COMPARE_SWAP))
1429 goto send_ack;
1430 qp->r_ack_state = OP(SEND_ONLY);
1431 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1432 qp->r_ack_psn = qp->r_psn;
1433 goto send_ack;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001434 }
1435 qp->r_rcv_len = 0;
1436 /* FALLTHROUGH */
1437 case OP(SEND_MIDDLE):
1438 case OP(RDMA_WRITE_MIDDLE):
1439 send_middle:
1440 /* Check for invalid length PMTU or posted rwqe len. */
1441 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1442 goto nack_inv;
1443 qp->r_rcv_len += pmtu;
1444 if (unlikely(qp->r_rcv_len > qp->r_len))
1445 goto nack_inv;
1446 ipath_copy_sge(&qp->r_sge, data, pmtu);
1447 break;
1448
1449 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1450 /* consume RWQE */
1451 if (!ipath_get_rwqe(qp, 1))
1452 goto rnr_nak;
1453 goto send_last_imm;
1454
1455 case OP(SEND_ONLY):
1456 case OP(SEND_ONLY_WITH_IMMEDIATE):
1457 if (!ipath_get_rwqe(qp, 0))
1458 goto rnr_nak;
1459 qp->r_rcv_len = 0;
1460 if (opcode == OP(SEND_ONLY))
1461 goto send_last;
1462 /* FALLTHROUGH */
1463 case OP(SEND_LAST_WITH_IMMEDIATE):
1464 send_last_imm:
1465 if (header_in_data) {
1466 wc.imm_data = *(__be32 *) data;
1467 data += sizeof(__be32);
1468 } else {
1469 /* Immediate data comes after BTH */
1470 wc.imm_data = ohdr->u.imm_data;
1471 }
1472 hdrsize += 4;
1473 wc.wc_flags = IB_WC_WITH_IMM;
1474 /* FALLTHROUGH */
1475 case OP(SEND_LAST):
1476 case OP(RDMA_WRITE_LAST):
1477 send_last:
1478 /* Get the number of bytes the message was padded by. */
1479 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1480 /* Check for invalid length. */
1481 /* XXX LAST len should be >= 1 */
1482 if (unlikely(tlen < (hdrsize + pad + 4)))
1483 goto nack_inv;
1484 /* Don't count the CRC. */
1485 tlen -= (hdrsize + pad + 4);
1486 wc.byte_len = tlen + qp->r_rcv_len;
1487 if (unlikely(wc.byte_len > qp->r_len))
1488 goto nack_inv;
1489 ipath_copy_sge(&qp->r_sge, data, tlen);
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001490 qp->r_msn++;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001491 if (opcode == OP(RDMA_WRITE_LAST) ||
1492 opcode == OP(RDMA_WRITE_ONLY))
1493 break;
1494 wc.wr_id = qp->r_wr_id;
1495 wc.status = IB_WC_SUCCESS;
1496 wc.opcode = IB_WC_RECV;
1497 wc.vendor_err = 0;
1498 wc.qp_num = qp->ibqp.qp_num;
1499 wc.src_qp = qp->remote_qpn;
1500 wc.pkey_index = 0;
1501 wc.slid = qp->remote_ah_attr.dlid;
1502 wc.sl = qp->remote_ah_attr.sl;
1503 wc.dlid_path_bits = 0;
1504 wc.port_num = 0;
1505 /* Signal completion event if the solicited bit is set. */
1506 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1507 (ohdr->bth[0] &
1508 __constant_cpu_to_be32(1 << 23)) != 0);
1509 break;
1510
1511 case OP(RDMA_WRITE_FIRST):
1512 case OP(RDMA_WRITE_ONLY):
1513 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1514 /* consume RWQE */
1515 /* RETH comes after BTH */
1516 if (!header_in_data)
1517 reth = &ohdr->u.rc.reth;
1518 else {
1519 reth = (struct ib_reth *)data;
1520 data += sizeof(*reth);
1521 }
1522 hdrsize += sizeof(*reth);
1523 qp->r_len = be32_to_cpu(reth->length);
1524 qp->r_rcv_len = 0;
1525 if (qp->r_len != 0) {
1526 u32 rkey = be32_to_cpu(reth->rkey);
1527 u64 vaddr = be64_to_cpu(reth->vaddr);
1528 int ok;
1529
1530 /* Check rkey & NAK */
1531 ok = ipath_rkey_ok(dev, &qp->r_sge,
1532 qp->r_len, vaddr, rkey,
1533 IB_ACCESS_REMOTE_WRITE);
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001534 if (unlikely(!ok))
1535 goto nack_acc;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001536 } else {
1537 qp->r_sge.sg_list = NULL;
1538 qp->r_sge.sge.mr = NULL;
1539 qp->r_sge.sge.vaddr = NULL;
1540 qp->r_sge.sge.length = 0;
1541 qp->r_sge.sge.sge_length = 0;
1542 }
1543 if (unlikely(!(qp->qp_access_flags &
1544 IB_ACCESS_REMOTE_WRITE)))
1545 goto nack_acc;
1546 if (opcode == OP(RDMA_WRITE_FIRST))
1547 goto send_middle;
1548 else if (opcode == OP(RDMA_WRITE_ONLY))
1549 goto send_last;
1550 if (!ipath_get_rwqe(qp, 1))
1551 goto rnr_nak;
1552 goto send_last_imm;
1553
1554 case OP(RDMA_READ_REQUEST):
1555 /* RETH comes after BTH */
1556 if (!header_in_data)
1557 reth = &ohdr->u.rc.reth;
1558 else {
1559 reth = (struct ib_reth *)data;
1560 data += sizeof(*reth);
1561 }
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001562 if (unlikely(!(qp->qp_access_flags &
1563 IB_ACCESS_REMOTE_READ)))
1564 goto nack_acc;
1565 spin_lock_irq(&qp->s_lock);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001566 qp->s_rdma_len = be32_to_cpu(reth->length);
1567 if (qp->s_rdma_len != 0) {
1568 u32 rkey = be32_to_cpu(reth->rkey);
1569 u64 vaddr = be64_to_cpu(reth->vaddr);
1570 int ok;
1571
1572 /* Check rkey & NAK */
1573 ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1574 qp->s_rdma_len, vaddr, rkey,
1575 IB_ACCESS_REMOTE_READ);
1576 if (unlikely(!ok)) {
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001577 spin_unlock_irq(&qp->s_lock);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001578 goto nack_acc;
1579 }
1580 /*
1581 * Update the next expected PSN. We add 1 later
1582 * below, so only add the remainder here.
1583 */
1584 if (qp->s_rdma_len > pmtu)
1585 qp->r_psn += (qp->s_rdma_len - 1) / pmtu;
1586 } else {
1587 qp->s_rdma_sge.sg_list = NULL;
1588 qp->s_rdma_sge.num_sge = 0;
1589 qp->s_rdma_sge.sge.mr = NULL;
1590 qp->s_rdma_sge.sge.vaddr = NULL;
1591 qp->s_rdma_sge.sge.length = 0;
1592 qp->s_rdma_sge.sge.sge_length = 0;
1593 }
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001594 /*
1595 * We need to increment the MSN here instead of when we
1596 * finish sending the result since a duplicate request would
1597 * increment it more than once.
1598 */
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001599 qp->r_msn++;
1600
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001601 qp->s_ack_state = opcode;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001602 qp->s_ack_psn = psn;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001603 spin_unlock_irq(&qp->s_lock);
1604
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001605 qp->r_psn++;
1606 qp->r_state = opcode;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001607 qp->r_nak_state = 0;
1608
1609 /* Call ipath_do_rc_send() in another thread. */
1610 tasklet_hi_schedule(&qp->s_task);
1611
1612 goto done;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001613
1614 case OP(COMPARE_SWAP):
1615 case OP(FETCH_ADD): {
1616 struct ib_atomic_eth *ateth;
1617 u64 vaddr;
1618 u64 sdata;
1619 u32 rkey;
1620
1621 if (!header_in_data)
1622 ateth = &ohdr->u.atomic_eth;
1623 else {
1624 ateth = (struct ib_atomic_eth *)data;
1625 data += sizeof(*ateth);
1626 }
1627 vaddr = be64_to_cpu(ateth->vaddr);
1628 if (unlikely(vaddr & (sizeof(u64) - 1)))
1629 goto nack_inv;
1630 rkey = be32_to_cpu(ateth->rkey);
1631 /* Check rkey & NAK */
1632 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge,
1633 sizeof(u64), vaddr, rkey,
1634 IB_ACCESS_REMOTE_ATOMIC)))
1635 goto nack_acc;
1636 if (unlikely(!(qp->qp_access_flags &
1637 IB_ACCESS_REMOTE_ATOMIC)))
1638 goto nack_acc;
1639 /* Perform atomic OP and save result. */
1640 sdata = be64_to_cpu(ateth->swap_data);
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001641 spin_lock_irq(&dev->pending_lock);
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001642 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
1643 if (opcode == OP(FETCH_ADD))
1644 *(u64 *) qp->r_sge.sge.vaddr =
1645 qp->r_atomic_data + sdata;
1646 else if (qp->r_atomic_data ==
1647 be64_to_cpu(ateth->compare_data))
1648 *(u64 *) qp->r_sge.sge.vaddr = sdata;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001649 spin_unlock_irq(&dev->pending_lock);
1650 qp->r_msn++;
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -07001651 qp->r_atomic_psn = psn & IPATH_PSN_MASK;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001652 psn |= 1 << 31;
1653 break;
1654 }
1655
1656 default:
1657 /* Drop packet for unknown opcodes. */
1658 goto done;
1659 }
1660 qp->r_psn++;
1661 qp->r_state = opcode;
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001662 qp->r_nak_state = 0;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001663 /* Send an ACK if requested or required. */
1664 if (psn & (1 << 31)) {
1665 /*
1666 * Coalesce ACKs unless there is a RDMA READ or
1667 * ATOMIC pending.
1668 */
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001669 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1670 qp->r_ack_state = opcode;
1671 qp->r_ack_psn = psn;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001672 }
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001673 goto send_ack;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001674 }
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001675 goto done;
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001676
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001677nack_acc:
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001678 /*
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001679 * A NAK will ACK earlier sends and RDMA writes.
1680 * Don't queue the NAK if a RDMA read, atomic, or NAK
1681 * is pending though.
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001682 */
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001683 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1684 /* XXX Flush WQEs */
1685 qp->state = IB_QPS_ERR;
1686 qp->r_ack_state = OP(RDMA_WRITE_ONLY);
1687 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1688 qp->r_ack_psn = qp->r_psn;
1689 }
1690send_ack:
1691 /* Send ACK right away unless the send tasklet has a pending ACK. */
1692 if (qp->s_ack_state == OP(ACKNOWLEDGE))
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001693 send_rc_ack(qp);
1694
Bryan O'Sullivan12eef412006-07-01 04:36:10 -07001695done:
Bryan O'Sullivan97f9efb2006-03-29 15:23:35 -08001696 return;
1697}