blob: b64d9ddc075ccd1cf376997b8b5fcbd714762e86 [file] [log] [blame]
Bryan O'Sullivan74ed6b52006-03-29 15:23:34 -08001/*
Bryan O'Sullivan759d5762006-07-01 04:35:49 -07002 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
Bryan O'Sullivan74ed6b52006-03-29 15:23:34 -08003 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "ipath_verbs.h"
35#include "ips_common.h"
36
37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_UC_##x
39
40static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
41 struct ib_wc *wc)
42{
43 if (++qp->s_last == qp->s_size)
44 qp->s_last = 0;
45 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
46 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
47 wc->wr_id = wqe->wr.wr_id;
48 wc->status = IB_WC_SUCCESS;
49 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
50 wc->vendor_err = 0;
51 wc->byte_len = wqe->length;
52 wc->qp_num = qp->ibqp.qp_num;
53 wc->src_qp = qp->remote_qpn;
54 wc->pkey_index = 0;
55 wc->slid = qp->remote_ah_attr.dlid;
56 wc->sl = qp->remote_ah_attr.sl;
57 wc->dlid_path_bits = 0;
58 wc->port_num = 0;
59 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0);
60 }
61 wqe = get_swqe_ptr(qp, qp->s_last);
62}
63
64/**
65 * ipath_do_uc_send - do a send on a UC queue
66 * @data: contains a pointer to the QP to send on
67 *
68 * Process entries in the send work queue until the queue is exhausted.
69 * Only allow one CPU to send a packet per QP (tasklet).
70 * Otherwise, after we drop the QP lock, two threads could send
71 * packets out of order.
72 * This is similar to ipath_do_rc_send() below except we don't have
73 * timeouts or resends.
74 */
75void ipath_do_uc_send(unsigned long data)
76{
77 struct ipath_qp *qp = (struct ipath_qp *)data;
78 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
79 struct ipath_swqe *wqe;
80 unsigned long flags;
81 u16 lrh0;
82 u32 hwords;
83 u32 nwords;
84 u32 extra_bytes;
85 u32 bth0;
86 u32 bth2;
87 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
88 u32 len;
89 struct ipath_other_headers *ohdr;
90 struct ib_wc wc;
91
92 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
93 goto bail;
94
95 if (unlikely(qp->remote_ah_attr.dlid ==
96 ipath_layer_get_lid(dev->dd))) {
97 /* Pass in an uninitialized ib_wc to save stack space. */
98 ipath_ruc_loopback(qp, &wc);
99 clear_bit(IPATH_S_BUSY, &qp->s_flags);
100 goto bail;
101 }
102
103 ohdr = &qp->s_hdr.u.oth;
104 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
105 ohdr = &qp->s_hdr.u.l.oth;
106
107again:
108 /* Check for a constructed packet to be sent. */
109 if (qp->s_hdrwords != 0) {
110 /*
111 * If no PIO bufs are available, return.
112 * An interrupt will call ipath_ib_piobufavail()
113 * when one is available.
114 */
115 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
116 (u32 *) &qp->s_hdr,
117 qp->s_cur_size,
118 qp->s_cur_sge)) {
119 ipath_no_bufs_available(qp, dev);
120 goto bail;
121 }
122 dev->n_unicast_xmit++;
123 /* Record that we sent the packet and s_hdr is empty. */
124 qp->s_hdrwords = 0;
125 }
126
127 lrh0 = IPS_LRH_BTH;
128 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
129 hwords = 5;
130
131 /*
132 * The lock is needed to synchronize between
133 * setting qp->s_ack_state and post_send().
134 */
135 spin_lock_irqsave(&qp->s_lock, flags);
136
137 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
138 goto done;
139
140 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
141
142 /* Send a request. */
143 wqe = get_swqe_ptr(qp, qp->s_last);
144 switch (qp->s_state) {
145 default:
146 /*
147 * Signal the completion of the last send (if there is
148 * one).
149 */
150 if (qp->s_last != qp->s_tail)
151 complete_last_send(qp, wqe, &wc);
152
153 /* Check if send work queue is empty. */
154 if (qp->s_tail == qp->s_head)
155 goto done;
156 /*
157 * Start a new request.
158 */
159 qp->s_psn = wqe->psn = qp->s_next_psn;
160 qp->s_sge.sge = wqe->sg_list[0];
161 qp->s_sge.sg_list = wqe->sg_list + 1;
162 qp->s_sge.num_sge = wqe->wr.num_sge;
163 qp->s_len = len = wqe->length;
164 switch (wqe->wr.opcode) {
165 case IB_WR_SEND:
166 case IB_WR_SEND_WITH_IMM:
167 if (len > pmtu) {
168 qp->s_state = OP(SEND_FIRST);
169 len = pmtu;
170 break;
171 }
172 if (wqe->wr.opcode == IB_WR_SEND)
173 qp->s_state = OP(SEND_ONLY);
174 else {
175 qp->s_state =
176 OP(SEND_ONLY_WITH_IMMEDIATE);
177 /* Immediate data comes after the BTH */
178 ohdr->u.imm_data = wqe->wr.imm_data;
179 hwords += 1;
180 }
181 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
182 bth0 |= 1 << 23;
183 break;
184
185 case IB_WR_RDMA_WRITE:
186 case IB_WR_RDMA_WRITE_WITH_IMM:
187 ohdr->u.rc.reth.vaddr =
188 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
189 ohdr->u.rc.reth.rkey =
190 cpu_to_be32(wqe->wr.wr.rdma.rkey);
191 ohdr->u.rc.reth.length = cpu_to_be32(len);
192 hwords += sizeof(struct ib_reth) / 4;
193 if (len > pmtu) {
194 qp->s_state = OP(RDMA_WRITE_FIRST);
195 len = pmtu;
196 break;
197 }
198 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
199 qp->s_state = OP(RDMA_WRITE_ONLY);
200 else {
201 qp->s_state =
202 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
203 /* Immediate data comes after the RETH */
204 ohdr->u.rc.imm_data = wqe->wr.imm_data;
205 hwords += 1;
206 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
207 bth0 |= 1 << 23;
208 }
209 break;
210
211 default:
212 goto done;
213 }
214 if (++qp->s_tail >= qp->s_size)
215 qp->s_tail = 0;
216 break;
217
218 case OP(SEND_FIRST):
219 qp->s_state = OP(SEND_MIDDLE);
220 /* FALLTHROUGH */
221 case OP(SEND_MIDDLE):
222 len = qp->s_len;
223 if (len > pmtu) {
224 len = pmtu;
225 break;
226 }
227 if (wqe->wr.opcode == IB_WR_SEND)
228 qp->s_state = OP(SEND_LAST);
229 else {
230 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
231 /* Immediate data comes after the BTH */
232 ohdr->u.imm_data = wqe->wr.imm_data;
233 hwords += 1;
234 }
235 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
236 bth0 |= 1 << 23;
237 break;
238
239 case OP(RDMA_WRITE_FIRST):
240 qp->s_state = OP(RDMA_WRITE_MIDDLE);
241 /* FALLTHROUGH */
242 case OP(RDMA_WRITE_MIDDLE):
243 len = qp->s_len;
244 if (len > pmtu) {
245 len = pmtu;
246 break;
247 }
248 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
249 qp->s_state = OP(RDMA_WRITE_LAST);
250 else {
251 qp->s_state =
252 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
253 /* Immediate data comes after the BTH */
254 ohdr->u.imm_data = wqe->wr.imm_data;
255 hwords += 1;
256 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
257 bth0 |= 1 << 23;
258 }
259 break;
260 }
261 bth2 = qp->s_next_psn++ & IPS_PSN_MASK;
262 qp->s_len -= len;
263 bth0 |= qp->s_state << 24;
264
265 spin_unlock_irqrestore(&qp->s_lock, flags);
266
267 /* Construct the header. */
268 extra_bytes = (4 - len) & 3;
269 nwords = (len + extra_bytes) >> 2;
270 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
271 /* Header size in 32-bit words. */
272 hwords += 10;
273 lrh0 = IPS_LRH_GRH;
274 qp->s_hdr.u.l.grh.version_tclass_flow =
275 cpu_to_be32((6 << 28) |
276 (qp->remote_ah_attr.grh.traffic_class
277 << 20) |
278 qp->remote_ah_attr.grh.flow_label);
279 qp->s_hdr.u.l.grh.paylen =
280 cpu_to_be16(((hwords - 12) + nwords +
281 SIZE_OF_CRC) << 2);
282 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
283 qp->s_hdr.u.l.grh.next_hdr = 0x1B;
284 qp->s_hdr.u.l.grh.hop_limit =
285 qp->remote_ah_attr.grh.hop_limit;
286 /* The SGID is 32-bit aligned. */
287 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
288 dev->gid_prefix;
289 qp->s_hdr.u.l.grh.sgid.global.interface_id =
290 ipath_layer_get_guid(dev->dd);
291 qp->s_hdr.u.l.grh.dgid = qp->remote_ah_attr.grh.dgid;
292 }
293 qp->s_hdrwords = hwords;
294 qp->s_cur_sge = &qp->s_sge;
295 qp->s_cur_size = len;
296 lrh0 |= qp->remote_ah_attr.sl << 4;
297 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
298 /* DEST LID */
299 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
300 qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
301 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
302 bth0 |= extra_bytes << 20;
303 ohdr->bth[0] = cpu_to_be32(bth0);
304 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
305 ohdr->bth[2] = cpu_to_be32(bth2);
306
307 /* Check for more work to do. */
308 goto again;
309
310done:
311 spin_unlock_irqrestore(&qp->s_lock, flags);
312 clear_bit(IPATH_S_BUSY, &qp->s_flags);
313
314bail:
315 return;
316}
317
318/**
319 * ipath_uc_rcv - handle an incoming UC packet
320 * @dev: the device the packet came in on
321 * @hdr: the header of the packet
322 * @has_grh: true if the packet has a GRH
323 * @data: the packet data
324 * @tlen: the length of the packet
325 * @qp: the QP for this packet.
326 *
327 * This is called from ipath_qp_rcv() to process an incoming UC packet
328 * for the given QP.
329 * Called at interrupt level.
330 */
331void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
332 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
333{
334 struct ipath_other_headers *ohdr;
335 int opcode;
336 u32 hdrsize;
337 u32 psn;
338 u32 pad;
339 unsigned long flags;
340 struct ib_wc wc;
341 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
342 struct ib_reth *reth;
343 int header_in_data;
344
345 /* Check for GRH */
346 if (!has_grh) {
347 ohdr = &hdr->u.oth;
348 hdrsize = 8 + 12; /* LRH + BTH */
349 psn = be32_to_cpu(ohdr->bth[2]);
350 header_in_data = 0;
351 } else {
352 ohdr = &hdr->u.l.oth;
353 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
354 /*
355 * The header with GRH is 60 bytes and the
356 * core driver sets the eager header buffer
357 * size to 56 bytes so the last 4 bytes of
358 * the BTH header (PSN) is in the data buffer.
359 */
360 header_in_data =
361 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
362 if (header_in_data) {
363 psn = be32_to_cpu(((__be32 *) data)[0]);
364 data += sizeof(__be32);
365 } else
366 psn = be32_to_cpu(ohdr->bth[2]);
367 }
368 /*
369 * The opcode is in the low byte when its in network order
370 * (top byte when in host order).
371 */
372 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
373
374 wc.imm_data = 0;
375 wc.wc_flags = 0;
376
377 spin_lock_irqsave(&qp->r_rq.lock, flags);
378
379 /* Compare the PSN verses the expected PSN. */
380 if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
381 /*
382 * Handle a sequence error.
383 * Silently drop any current message.
384 */
385 qp->r_psn = psn;
386 inv:
387 qp->r_state = OP(SEND_LAST);
388 switch (opcode) {
389 case OP(SEND_FIRST):
390 case OP(SEND_ONLY):
391 case OP(SEND_ONLY_WITH_IMMEDIATE):
392 goto send_first;
393
394 case OP(RDMA_WRITE_FIRST):
395 case OP(RDMA_WRITE_ONLY):
396 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
397 goto rdma_first;
398
399 default:
400 dev->n_pkt_drops++;
401 goto done;
402 }
403 }
404
405 /* Check for opcode sequence errors. */
406 switch (qp->r_state) {
407 case OP(SEND_FIRST):
408 case OP(SEND_MIDDLE):
409 if (opcode == OP(SEND_MIDDLE) ||
410 opcode == OP(SEND_LAST) ||
411 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
412 break;
413 goto inv;
414
415 case OP(RDMA_WRITE_FIRST):
416 case OP(RDMA_WRITE_MIDDLE):
417 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
418 opcode == OP(RDMA_WRITE_LAST) ||
419 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
420 break;
421 goto inv;
422
423 default:
424 if (opcode == OP(SEND_FIRST) ||
425 opcode == OP(SEND_ONLY) ||
426 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
427 opcode == OP(RDMA_WRITE_FIRST) ||
428 opcode == OP(RDMA_WRITE_ONLY) ||
429 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
430 break;
431 goto inv;
432 }
433
434 /* OK, process the packet. */
435 switch (opcode) {
436 case OP(SEND_FIRST):
437 case OP(SEND_ONLY):
438 case OP(SEND_ONLY_WITH_IMMEDIATE):
439 send_first:
440 if (qp->r_reuse_sge) {
441 qp->r_reuse_sge = 0;
442 qp->r_sge = qp->s_rdma_sge;
443 } else if (!ipath_get_rwqe(qp, 0)) {
444 dev->n_pkt_drops++;
445 goto done;
446 }
447 /* Save the WQE so we can reuse it in case of an error. */
448 qp->s_rdma_sge = qp->r_sge;
449 qp->r_rcv_len = 0;
450 if (opcode == OP(SEND_ONLY))
451 goto send_last;
452 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
453 goto send_last_imm;
454 /* FALLTHROUGH */
455 case OP(SEND_MIDDLE):
456 /* Check for invalid length PMTU or posted rwqe len. */
457 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
458 qp->r_reuse_sge = 1;
459 dev->n_pkt_drops++;
460 goto done;
461 }
462 qp->r_rcv_len += pmtu;
463 if (unlikely(qp->r_rcv_len > qp->r_len)) {
464 qp->r_reuse_sge = 1;
465 dev->n_pkt_drops++;
466 goto done;
467 }
468 ipath_copy_sge(&qp->r_sge, data, pmtu);
469 break;
470
471 case OP(SEND_LAST_WITH_IMMEDIATE):
472 send_last_imm:
473 if (header_in_data) {
474 wc.imm_data = *(__be32 *) data;
475 data += sizeof(__be32);
476 } else {
477 /* Immediate data comes after BTH */
478 wc.imm_data = ohdr->u.imm_data;
479 }
480 hdrsize += 4;
481 wc.wc_flags = IB_WC_WITH_IMM;
482 /* FALLTHROUGH */
483 case OP(SEND_LAST):
484 send_last:
485 /* Get the number of bytes the message was padded by. */
486 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
487 /* Check for invalid length. */
488 /* XXX LAST len should be >= 1 */
489 if (unlikely(tlen < (hdrsize + pad + 4))) {
490 qp->r_reuse_sge = 1;
491 dev->n_pkt_drops++;
492 goto done;
493 }
494 /* Don't count the CRC. */
495 tlen -= (hdrsize + pad + 4);
496 wc.byte_len = tlen + qp->r_rcv_len;
497 if (unlikely(wc.byte_len > qp->r_len)) {
498 qp->r_reuse_sge = 1;
499 dev->n_pkt_drops++;
500 goto done;
501 }
502 /* XXX Need to free SGEs */
503 last_imm:
504 ipath_copy_sge(&qp->r_sge, data, tlen);
505 wc.wr_id = qp->r_wr_id;
506 wc.status = IB_WC_SUCCESS;
507 wc.opcode = IB_WC_RECV;
508 wc.vendor_err = 0;
509 wc.qp_num = qp->ibqp.qp_num;
510 wc.src_qp = qp->remote_qpn;
511 wc.pkey_index = 0;
512 wc.slid = qp->remote_ah_attr.dlid;
513 wc.sl = qp->remote_ah_attr.sl;
514 wc.dlid_path_bits = 0;
515 wc.port_num = 0;
516 /* Signal completion event if the solicited bit is set. */
517 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
518 (ohdr->bth[0] &
519 __constant_cpu_to_be32(1 << 23)) != 0);
520 break;
521
522 case OP(RDMA_WRITE_FIRST):
523 case OP(RDMA_WRITE_ONLY):
524 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
525 rdma_first:
526 /* RETH comes after BTH */
527 if (!header_in_data)
528 reth = &ohdr->u.rc.reth;
529 else {
530 reth = (struct ib_reth *)data;
531 data += sizeof(*reth);
532 }
533 hdrsize += sizeof(*reth);
534 qp->r_len = be32_to_cpu(reth->length);
535 qp->r_rcv_len = 0;
536 if (qp->r_len != 0) {
537 u32 rkey = be32_to_cpu(reth->rkey);
538 u64 vaddr = be64_to_cpu(reth->vaddr);
539
540 /* Check rkey */
541 if (unlikely(!ipath_rkey_ok(
542 dev, &qp->r_sge, qp->r_len,
543 vaddr, rkey,
544 IB_ACCESS_REMOTE_WRITE))) {
545 dev->n_pkt_drops++;
546 goto done;
547 }
548 } else {
549 qp->r_sge.sg_list = NULL;
550 qp->r_sge.sge.mr = NULL;
551 qp->r_sge.sge.vaddr = NULL;
552 qp->r_sge.sge.length = 0;
553 qp->r_sge.sge.sge_length = 0;
554 }
555 if (unlikely(!(qp->qp_access_flags &
556 IB_ACCESS_REMOTE_WRITE))) {
557 dev->n_pkt_drops++;
558 goto done;
559 }
560 if (opcode == OP(RDMA_WRITE_ONLY))
561 goto rdma_last;
562 else if (opcode ==
563 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
564 goto rdma_last_imm;
565 /* FALLTHROUGH */
566 case OP(RDMA_WRITE_MIDDLE):
567 /* Check for invalid length PMTU or posted rwqe len. */
568 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
569 dev->n_pkt_drops++;
570 goto done;
571 }
572 qp->r_rcv_len += pmtu;
573 if (unlikely(qp->r_rcv_len > qp->r_len)) {
574 dev->n_pkt_drops++;
575 goto done;
576 }
577 ipath_copy_sge(&qp->r_sge, data, pmtu);
578 break;
579
580 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
581 rdma_last_imm:
582 /* Get the number of bytes the message was padded by. */
583 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
584 /* Check for invalid length. */
585 /* XXX LAST len should be >= 1 */
586 if (unlikely(tlen < (hdrsize + pad + 4))) {
587 dev->n_pkt_drops++;
588 goto done;
589 }
590 /* Don't count the CRC. */
591 tlen -= (hdrsize + pad + 4);
592 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
593 dev->n_pkt_drops++;
594 goto done;
595 }
596 if (qp->r_reuse_sge) {
597 qp->r_reuse_sge = 0;
598 } else if (!ipath_get_rwqe(qp, 1)) {
599 dev->n_pkt_drops++;
600 goto done;
601 }
602 if (header_in_data) {
603 wc.imm_data = *(__be32 *) data;
604 data += sizeof(__be32);
605 } else {
606 /* Immediate data comes after BTH */
607 wc.imm_data = ohdr->u.imm_data;
608 }
609 hdrsize += 4;
610 wc.wc_flags = IB_WC_WITH_IMM;
611 wc.byte_len = 0;
612 goto last_imm;
613
614 case OP(RDMA_WRITE_LAST):
615 rdma_last:
616 /* Get the number of bytes the message was padded by. */
617 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
618 /* Check for invalid length. */
619 /* XXX LAST len should be >= 1 */
620 if (unlikely(tlen < (hdrsize + pad + 4))) {
621 dev->n_pkt_drops++;
622 goto done;
623 }
624 /* Don't count the CRC. */
625 tlen -= (hdrsize + pad + 4);
626 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
627 dev->n_pkt_drops++;
628 goto done;
629 }
630 ipath_copy_sge(&qp->r_sge, data, tlen);
631 break;
632
633 default:
634 /* Drop packet for unknown opcodes. */
635 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
636 dev->n_pkt_drops++;
637 goto bail;
638 }
639 qp->r_psn++;
640 qp->r_state = opcode;
641done:
642 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
643
644bail:
645 return;
646}