blob: 9a4630f8276f4a1331384b3b673e90634d299f38 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include "qib.h"
36
37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_UC_##x
39
40/**
41 * qib_make_uc_req - construct a request packet (SEND, RDMA write)
42 * @qp: a pointer to the QP
43 *
44 * Return 1 if constructed; otherwise, return 0.
45 */
46int qib_make_uc_req(struct qib_qp *qp)
47{
48 struct qib_other_headers *ohdr;
49 struct qib_swqe *wqe;
50 unsigned long flags;
51 u32 hwords;
52 u32 bth0;
53 u32 len;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -040054 u32 pmtu = qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -070055 int ret = 0;
56
57 spin_lock_irqsave(&qp->s_lock, flags);
58
59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
61 goto bail;
62 /* We are in the error state, flush the work request. */
63 if (qp->s_last == qp->s_head)
64 goto bail;
65 /* If DMAs are in progress, we can't flush immediately. */
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp->s_flags |= QIB_S_WAIT_DMA;
68 goto bail;
69 }
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
72 goto done;
73 }
74
75 ohdr = &qp->s_hdr.u.oth;
76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
77 ohdr = &qp->s_hdr.u.l.oth;
78
79 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
80 hwords = 5;
81 bth0 = 0;
82
83 /* Get the next send request. */
84 wqe = get_swqe_ptr(qp, qp->s_cur);
85 qp->s_wqe = NULL;
86 switch (qp->s_state) {
87 default:
88 if (!(ib_qib_state_ops[qp->state] &
89 QIB_PROCESS_NEXT_SEND_OK))
90 goto bail;
91 /* Check if send work queue is empty. */
92 if (qp->s_cur == qp->s_head)
93 goto bail;
94 /*
95 * Start a new request.
96 */
97 wqe->psn = qp->s_next_psn;
98 qp->s_psn = qp->s_next_psn;
99 qp->s_sge.sge = wqe->sg_list[0];
100 qp->s_sge.sg_list = wqe->sg_list + 1;
101 qp->s_sge.num_sge = wqe->wr.num_sge;
102 qp->s_sge.total_len = wqe->length;
103 len = wqe->length;
104 qp->s_len = len;
105 switch (wqe->wr.opcode) {
106 case IB_WR_SEND:
107 case IB_WR_SEND_WITH_IMM:
108 if (len > pmtu) {
109 qp->s_state = OP(SEND_FIRST);
110 len = pmtu;
111 break;
112 }
113 if (wqe->wr.opcode == IB_WR_SEND)
114 qp->s_state = OP(SEND_ONLY);
115 else {
116 qp->s_state =
117 OP(SEND_ONLY_WITH_IMMEDIATE);
118 /* Immediate data comes after the BTH */
119 ohdr->u.imm_data = wqe->wr.ex.imm_data;
120 hwords += 1;
121 }
122 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
123 bth0 |= IB_BTH_SOLICITED;
124 qp->s_wqe = wqe;
125 if (++qp->s_cur >= qp->s_size)
126 qp->s_cur = 0;
127 break;
128
129 case IB_WR_RDMA_WRITE:
130 case IB_WR_RDMA_WRITE_WITH_IMM:
131 ohdr->u.rc.reth.vaddr =
132 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
133 ohdr->u.rc.reth.rkey =
134 cpu_to_be32(wqe->wr.wr.rdma.rkey);
135 ohdr->u.rc.reth.length = cpu_to_be32(len);
136 hwords += sizeof(struct ib_reth) / 4;
137 if (len > pmtu) {
138 qp->s_state = OP(RDMA_WRITE_FIRST);
139 len = pmtu;
140 break;
141 }
142 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
143 qp->s_state = OP(RDMA_WRITE_ONLY);
144 else {
145 qp->s_state =
146 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
147 /* Immediate data comes after the RETH */
148 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
149 hwords += 1;
150 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
151 bth0 |= IB_BTH_SOLICITED;
152 }
153 qp->s_wqe = wqe;
154 if (++qp->s_cur >= qp->s_size)
155 qp->s_cur = 0;
156 break;
157
158 default:
159 goto bail;
160 }
161 break;
162
163 case OP(SEND_FIRST):
164 qp->s_state = OP(SEND_MIDDLE);
165 /* FALLTHROUGH */
166 case OP(SEND_MIDDLE):
167 len = qp->s_len;
168 if (len > pmtu) {
169 len = pmtu;
170 break;
171 }
172 if (wqe->wr.opcode == IB_WR_SEND)
173 qp->s_state = OP(SEND_LAST);
174 else {
175 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
176 /* Immediate data comes after the BTH */
177 ohdr->u.imm_data = wqe->wr.ex.imm_data;
178 hwords += 1;
179 }
180 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
181 bth0 |= IB_BTH_SOLICITED;
182 qp->s_wqe = wqe;
183 if (++qp->s_cur >= qp->s_size)
184 qp->s_cur = 0;
185 break;
186
187 case OP(RDMA_WRITE_FIRST):
188 qp->s_state = OP(RDMA_WRITE_MIDDLE);
189 /* FALLTHROUGH */
190 case OP(RDMA_WRITE_MIDDLE):
191 len = qp->s_len;
192 if (len > pmtu) {
193 len = pmtu;
194 break;
195 }
196 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
197 qp->s_state = OP(RDMA_WRITE_LAST);
198 else {
199 qp->s_state =
200 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
201 /* Immediate data comes after the BTH */
202 ohdr->u.imm_data = wqe->wr.ex.imm_data;
203 hwords += 1;
204 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
205 bth0 |= IB_BTH_SOLICITED;
206 }
207 qp->s_wqe = wqe;
208 if (++qp->s_cur >= qp->s_size)
209 qp->s_cur = 0;
210 break;
211 }
212 qp->s_len -= len;
213 qp->s_hdrwords = hwords;
214 qp->s_cur_sge = &qp->s_sge;
215 qp->s_cur_size = len;
216 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
217 qp->s_next_psn++ & QIB_PSN_MASK);
218done:
219 ret = 1;
220 goto unlock;
221
222bail:
223 qp->s_flags &= ~QIB_S_BUSY;
224unlock:
225 spin_unlock_irqrestore(&qp->s_lock, flags);
226 return ret;
227}
228
229/**
230 * qib_uc_rcv - handle an incoming UC packet
231 * @ibp: the port the packet came in on
232 * @hdr: the header of the packet
233 * @has_grh: true if the packet has a GRH
234 * @data: the packet data
235 * @tlen: the length of the packet
236 * @qp: the QP for this packet.
237 *
238 * This is called from qib_qp_rcv() to process an incoming UC packet
239 * for the given QP.
240 * Called at interrupt level.
241 */
242void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
243 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
244{
245 struct qib_other_headers *ohdr;
246 unsigned long flags;
247 u32 opcode;
248 u32 hdrsize;
249 u32 psn;
250 u32 pad;
251 struct ib_wc wc;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400252 u32 pmtu = qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -0700253 struct ib_reth *reth;
254 int ret;
255
256 /* Check for GRH */
257 if (!has_grh) {
258 ohdr = &hdr->u.oth;
259 hdrsize = 8 + 12; /* LRH + BTH */
260 } else {
261 ohdr = &hdr->u.l.oth;
262 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
263 }
264
265 opcode = be32_to_cpu(ohdr->bth[0]);
266 spin_lock_irqsave(&qp->s_lock, flags);
267 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
268 goto sunlock;
269 spin_unlock_irqrestore(&qp->s_lock, flags);
270
271 psn = be32_to_cpu(ohdr->bth[2]);
272 opcode >>= 24;
Ralph Campbellf9315512010-05-23 21:44:54 -0700273
Ralph Campbellf9315512010-05-23 21:44:54 -0700274 /* Compare the PSN verses the expected PSN. */
275 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
276 /*
277 * Handle a sequence error.
278 * Silently drop any current message.
279 */
280 qp->r_psn = psn;
281inv:
282 if (qp->r_state == OP(SEND_FIRST) ||
283 qp->r_state == OP(SEND_MIDDLE)) {
284 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
285 qp->r_sge.num_sge = 0;
286 } else
287 while (qp->r_sge.num_sge) {
288 atomic_dec(&qp->r_sge.sge.mr->refcount);
289 if (--qp->r_sge.num_sge)
290 qp->r_sge.sge = *qp->r_sge.sg_list++;
291 }
292 qp->r_state = OP(SEND_LAST);
293 switch (opcode) {
294 case OP(SEND_FIRST):
295 case OP(SEND_ONLY):
296 case OP(SEND_ONLY_WITH_IMMEDIATE):
297 goto send_first;
298
299 case OP(RDMA_WRITE_FIRST):
300 case OP(RDMA_WRITE_ONLY):
301 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
302 goto rdma_first;
303
304 default:
305 goto drop;
306 }
307 }
308
309 /* Check for opcode sequence errors. */
310 switch (qp->r_state) {
311 case OP(SEND_FIRST):
312 case OP(SEND_MIDDLE):
313 if (opcode == OP(SEND_MIDDLE) ||
314 opcode == OP(SEND_LAST) ||
315 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
316 break;
317 goto inv;
318
319 case OP(RDMA_WRITE_FIRST):
320 case OP(RDMA_WRITE_MIDDLE):
321 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
322 opcode == OP(RDMA_WRITE_LAST) ||
323 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
324 break;
325 goto inv;
326
327 default:
328 if (opcode == OP(SEND_FIRST) ||
329 opcode == OP(SEND_ONLY) ||
330 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
331 opcode == OP(RDMA_WRITE_FIRST) ||
332 opcode == OP(RDMA_WRITE_ONLY) ||
333 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
334 break;
335 goto inv;
336 }
337
338 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
339 qp->r_flags |= QIB_R_COMM_EST;
340 if (qp->ibqp.event_handler) {
341 struct ib_event ev;
342
343 ev.device = qp->ibqp.device;
344 ev.element.qp = &qp->ibqp;
345 ev.event = IB_EVENT_COMM_EST;
346 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
347 }
348 }
349
350 /* OK, process the packet. */
351 switch (opcode) {
352 case OP(SEND_FIRST):
353 case OP(SEND_ONLY):
354 case OP(SEND_ONLY_WITH_IMMEDIATE):
355send_first:
356 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
357 qp->r_sge = qp->s_rdma_read_sge;
358 else {
359 ret = qib_get_rwqe(qp, 0);
360 if (ret < 0)
361 goto op_err;
362 if (!ret)
363 goto drop;
364 /*
365 * qp->s_rdma_read_sge will be the owner
366 * of the mr references.
367 */
368 qp->s_rdma_read_sge = qp->r_sge;
369 }
370 qp->r_rcv_len = 0;
371 if (opcode == OP(SEND_ONLY))
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -0400372 goto no_immediate_data;
Ralph Campbellf9315512010-05-23 21:44:54 -0700373 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
374 goto send_last_imm;
375 /* FALLTHROUGH */
376 case OP(SEND_MIDDLE):
377 /* Check for invalid length PMTU or posted rwqe len. */
378 if (unlikely(tlen != (hdrsize + pmtu + 4)))
379 goto rewind;
380 qp->r_rcv_len += pmtu;
381 if (unlikely(qp->r_rcv_len > qp->r_len))
382 goto rewind;
383 qib_copy_sge(&qp->r_sge, data, pmtu, 0);
384 break;
385
386 case OP(SEND_LAST_WITH_IMMEDIATE):
387send_last_imm:
388 wc.ex.imm_data = ohdr->u.imm_data;
389 hdrsize += 4;
390 wc.wc_flags = IB_WC_WITH_IMM;
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -0400391 goto send_last;
Ralph Campbellf9315512010-05-23 21:44:54 -0700392 case OP(SEND_LAST):
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -0400393no_immediate_data:
394 wc.ex.imm_data = 0;
395 wc.wc_flags = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700396send_last:
397 /* Get the number of bytes the message was padded by. */
398 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
399 /* Check for invalid length. */
400 /* XXX LAST len should be >= 1 */
401 if (unlikely(tlen < (hdrsize + pad + 4)))
402 goto rewind;
403 /* Don't count the CRC. */
404 tlen -= (hdrsize + pad + 4);
405 wc.byte_len = tlen + qp->r_rcv_len;
406 if (unlikely(wc.byte_len > qp->r_len))
407 goto rewind;
408 wc.opcode = IB_WC_RECV;
409last_imm:
410 qib_copy_sge(&qp->r_sge, data, tlen, 0);
411 while (qp->s_rdma_read_sge.num_sge) {
412 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
413 if (--qp->s_rdma_read_sge.num_sge)
414 qp->s_rdma_read_sge.sge =
415 *qp->s_rdma_read_sge.sg_list++;
416 }
417 wc.wr_id = qp->r_wr_id;
418 wc.status = IB_WC_SUCCESS;
419 wc.qp = &qp->ibqp;
420 wc.src_qp = qp->remote_qpn;
421 wc.slid = qp->remote_ah_attr.dlid;
422 wc.sl = qp->remote_ah_attr.sl;
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -0400423 /* zero fields that are N/A */
424 wc.vendor_err = 0;
425 wc.pkey_index = 0;
426 wc.dlid_path_bits = 0;
427 wc.port_num = 0;
428 wc.csum_ok = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700429 /* Signal completion event if the solicited bit is set. */
430 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
431 (ohdr->bth[0] &
432 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
433 break;
434
435 case OP(RDMA_WRITE_FIRST):
436 case OP(RDMA_WRITE_ONLY):
437 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
438rdma_first:
439 if (unlikely(!(qp->qp_access_flags &
440 IB_ACCESS_REMOTE_WRITE))) {
441 goto drop;
442 }
443 reth = &ohdr->u.rc.reth;
444 hdrsize += sizeof(*reth);
445 qp->r_len = be32_to_cpu(reth->length);
446 qp->r_rcv_len = 0;
447 qp->r_sge.sg_list = NULL;
448 if (qp->r_len != 0) {
449 u32 rkey = be32_to_cpu(reth->rkey);
450 u64 vaddr = be64_to_cpu(reth->vaddr);
451 int ok;
452
453 /* Check rkey */
454 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
455 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
456 if (unlikely(!ok))
457 goto drop;
458 qp->r_sge.num_sge = 1;
459 } else {
460 qp->r_sge.num_sge = 0;
461 qp->r_sge.sge.mr = NULL;
462 qp->r_sge.sge.vaddr = NULL;
463 qp->r_sge.sge.length = 0;
464 qp->r_sge.sge.sge_length = 0;
465 }
466 if (opcode == OP(RDMA_WRITE_ONLY))
467 goto rdma_last;
Jason Gunthorpe5715f5d2010-10-22 22:00:48 +0000468 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
469 wc.ex.imm_data = ohdr->u.rc.imm_data;
Ralph Campbellf9315512010-05-23 21:44:54 -0700470 goto rdma_last_imm;
Jason Gunthorpe5715f5d2010-10-22 22:00:48 +0000471 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700472 /* FALLTHROUGH */
473 case OP(RDMA_WRITE_MIDDLE):
474 /* Check for invalid length PMTU or posted rwqe len. */
475 if (unlikely(tlen != (hdrsize + pmtu + 4)))
476 goto drop;
477 qp->r_rcv_len += pmtu;
478 if (unlikely(qp->r_rcv_len > qp->r_len))
479 goto drop;
480 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
481 break;
482
483 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
Ralph Campbellf9315512010-05-23 21:44:54 -0700484 wc.ex.imm_data = ohdr->u.imm_data;
Jason Gunthorpe5715f5d2010-10-22 22:00:48 +0000485rdma_last_imm:
Ralph Campbellf9315512010-05-23 21:44:54 -0700486 hdrsize += 4;
487 wc.wc_flags = IB_WC_WITH_IMM;
488
489 /* Get the number of bytes the message was padded by. */
490 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
491 /* Check for invalid length. */
492 /* XXX LAST len should be >= 1 */
493 if (unlikely(tlen < (hdrsize + pad + 4)))
494 goto drop;
495 /* Don't count the CRC. */
496 tlen -= (hdrsize + pad + 4);
497 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
498 goto drop;
499 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
500 while (qp->s_rdma_read_sge.num_sge) {
501 atomic_dec(&qp->s_rdma_read_sge.sge.mr->
502 refcount);
503 if (--qp->s_rdma_read_sge.num_sge)
504 qp->s_rdma_read_sge.sge =
505 *qp->s_rdma_read_sge.sg_list++;
506 }
507 else {
508 ret = qib_get_rwqe(qp, 1);
509 if (ret < 0)
510 goto op_err;
511 if (!ret)
512 goto drop;
513 }
514 wc.byte_len = qp->r_len;
515 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
516 goto last_imm;
517
518 case OP(RDMA_WRITE_LAST):
519rdma_last:
520 /* Get the number of bytes the message was padded by. */
521 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
522 /* Check for invalid length. */
523 /* XXX LAST len should be >= 1 */
524 if (unlikely(tlen < (hdrsize + pad + 4)))
525 goto drop;
526 /* Don't count the CRC. */
527 tlen -= (hdrsize + pad + 4);
528 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
529 goto drop;
530 qib_copy_sge(&qp->r_sge, data, tlen, 1);
531 while (qp->r_sge.num_sge) {
532 atomic_dec(&qp->r_sge.sge.mr->refcount);
533 if (--qp->r_sge.num_sge)
534 qp->r_sge.sge = *qp->r_sge.sg_list++;
535 }
536 break;
537
538 default:
539 /* Drop packet for unknown opcodes. */
540 goto drop;
541 }
542 qp->r_psn++;
543 qp->r_state = opcode;
Ralph Campbellf9315512010-05-23 21:44:54 -0700544 return;
545
546rewind:
547 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
548 qp->r_sge.num_sge = 0;
549drop:
550 ibp->n_pkt_drops++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700551 return;
552
553op_err:
554 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -0700555 return;
556
557sunlock:
558 spin_unlock_irqrestore(&qp->s_lock, flags);
559}