blob: ce886b2ade74e58a4b4e08d1c8861ab687405b81 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/io.h>
35
36#include "qib.h"
37
38/* cut down ridiculously long IB macro names */
39#define OP(x) IB_OPCODE_RC_##x
40
41static void rc_timeout(unsigned long arg);
42
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080043static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
Ralph Campbellf9315512010-05-23 21:44:54 -070044 u32 psn, u32 pmtu)
45{
46 u32 len;
47
48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
49 ss->sge = wqe->sg_list[0];
50 ss->sg_list = wqe->sg_list + 1;
51 ss->num_sge = wqe->wr.num_sge;
52 ss->total_len = wqe->length;
53 qib_skip_sge(ss, len, 0);
54 return wqe->length - len;
55}
56
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080057static void start_timer(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -070058{
Harish Chegondi01ba79d2016-01-22 12:56:46 -080059 qp->s_flags |= RVT_S_TIMER;
Ralph Campbellf9315512010-05-23 21:44:54 -070060 qp->s_timer.function = rc_timeout;
61 /* 4.096 usec. * (1 << qp->timeout) */
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -040062 qp->s_timer.expires = jiffies + qp->timeout_jiffies;
Ralph Campbellf9315512010-05-23 21:44:54 -070063 add_timer(&qp->s_timer);
64}
65
66/**
67 * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
68 * @dev: the device for this QP
69 * @qp: a pointer to the QP
70 * @ohdr: a pointer to the IB header being constructed
71 * @pmtu: the path MTU
72 *
73 * Return 1 if constructed; otherwise, return 0.
74 * Note that we are in the responder's side of the QP context.
75 * Note the QP s_lock must be held.
76 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080077static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
Ralph Campbellf9315512010-05-23 21:44:54 -070078 struct qib_other_headers *ohdr, u32 pmtu)
79{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080080 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -070081 u32 hwords;
82 u32 len;
83 u32 bth0;
84 u32 bth2;
85
86 /* Don't send an ACK if we aren't supposed to. */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -080087 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -070088 goto bail;
89
90 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
91 hwords = 5;
92
93 switch (qp->s_ack_state) {
94 case OP(RDMA_READ_RESPONSE_LAST):
95 case OP(RDMA_READ_RESPONSE_ONLY):
96 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
97 if (e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080098 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -070099 e->rdma_sge.mr = NULL;
100 }
101 /* FALLTHROUGH */
102 case OP(ATOMIC_ACKNOWLEDGE):
103 /*
104 * We can increment the tail pointer now that the last
105 * response has been sent instead of only being
106 * constructed.
107 */
108 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
109 qp->s_tail_ack_queue = 0;
110 /* FALLTHROUGH */
111 case OP(SEND_ONLY):
112 case OP(ACKNOWLEDGE):
113 /* Check for no next entry in the queue. */
114 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800115 if (qp->s_flags & RVT_S_ACK_PENDING)
Ralph Campbellf9315512010-05-23 21:44:54 -0700116 goto normal;
117 goto bail;
118 }
119
120 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
121 if (e->opcode == OP(RDMA_READ_REQUEST)) {
122 /*
123 * If a RDMA read response is being resent and
124 * we haven't seen the duplicate request yet,
125 * then stop sending the remaining responses the
126 * responder has seen until the requester resends it.
127 */
128 len = e->rdma_sge.sge_length;
129 if (len && !e->rdma_sge.mr) {
130 qp->s_tail_ack_queue = qp->r_head_ack_queue;
131 goto bail;
132 }
133 /* Copy SGE state in case we need to resend */
134 qp->s_rdma_mr = e->rdma_sge.mr;
135 if (qp->s_rdma_mr)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800136 rvt_get_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700137 qp->s_ack_rdma_sge.sge = e->rdma_sge;
138 qp->s_ack_rdma_sge.num_sge = 1;
139 qp->s_cur_sge = &qp->s_ack_rdma_sge;
140 if (len > pmtu) {
141 len = pmtu;
142 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
143 } else {
144 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
145 e->sent = 1;
146 }
147 ohdr->u.aeth = qib_compute_aeth(qp);
148 hwords++;
149 qp->s_ack_rdma_psn = e->psn;
150 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
151 } else {
152 /* COMPARE_SWAP or FETCH_ADD */
153 qp->s_cur_sge = NULL;
154 len = 0;
155 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
156 ohdr->u.at.aeth = qib_compute_aeth(qp);
157 ohdr->u.at.atomic_ack_eth[0] =
158 cpu_to_be32(e->atomic_data >> 32);
159 ohdr->u.at.atomic_ack_eth[1] =
160 cpu_to_be32(e->atomic_data);
161 hwords += sizeof(ohdr->u.at) / sizeof(u32);
162 bth2 = e->psn & QIB_PSN_MASK;
163 e->sent = 1;
164 }
165 bth0 = qp->s_ack_state << 24;
166 break;
167
168 case OP(RDMA_READ_RESPONSE_FIRST):
169 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
170 /* FALLTHROUGH */
171 case OP(RDMA_READ_RESPONSE_MIDDLE):
172 qp->s_cur_sge = &qp->s_ack_rdma_sge;
173 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
174 if (qp->s_rdma_mr)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800175 rvt_get_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700176 len = qp->s_ack_rdma_sge.sge.sge_length;
177 if (len > pmtu)
178 len = pmtu;
179 else {
180 ohdr->u.aeth = qib_compute_aeth(qp);
181 hwords++;
182 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
183 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
184 e->sent = 1;
185 }
186 bth0 = qp->s_ack_state << 24;
187 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
188 break;
189
190 default:
191normal:
192 /*
193 * Send a regular ACK.
194 * Set the s_ack_state so we wait until after sending
195 * the ACK before setting s_ack_state to ACKNOWLEDGE
196 * (see above).
197 */
198 qp->s_ack_state = OP(SEND_ONLY);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800199 qp->s_flags &= ~RVT_S_ACK_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -0700200 qp->s_cur_sge = NULL;
201 if (qp->s_nak_state)
202 ohdr->u.aeth =
203 cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
204 (qp->s_nak_state <<
205 QIB_AETH_CREDIT_SHIFT));
206 else
207 ohdr->u.aeth = qib_compute_aeth(qp);
208 hwords++;
209 len = 0;
210 bth0 = OP(ACKNOWLEDGE) << 24;
211 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
212 }
213 qp->s_rdma_ack_cnt++;
214 qp->s_hdrwords = hwords;
215 qp->s_cur_size = len;
216 qib_make_ruc_header(qp, ohdr, bth0, bth2);
217 return 1;
218
219bail:
220 qp->s_ack_state = OP(ACKNOWLEDGE);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800221 qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
Ralph Campbellf9315512010-05-23 21:44:54 -0700222 return 0;
223}
224
225/**
226 * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
227 * @qp: a pointer to the QP
228 *
229 * Return 1 if constructed; otherwise, return 0.
230 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800231int qib_make_rc_req(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700232{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800233 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700234 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
235 struct qib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800236 struct rvt_sge_state *ss;
237 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700238 u32 hwords;
239 u32 len;
240 u32 bth0;
241 u32 bth2;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400242 u32 pmtu = qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -0700243 char newreq;
244 unsigned long flags;
245 int ret = 0;
246 int delta;
247
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800248 ohdr = &priv->s_hdr->u.oth;
Ralph Campbellf9315512010-05-23 21:44:54 -0700249 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800250 ohdr = &priv->s_hdr->u.l.oth;
Ralph Campbellf9315512010-05-23 21:44:54 -0700251
252 /*
253 * The lock is needed to synchronize between the sending tasklet,
254 * the receive interrupt handler, and timeout resends.
255 */
256 spin_lock_irqsave(&qp->s_lock, flags);
257
258 /* Sending responses has higher priority over sending requests. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800259 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
Ralph Campbellf9315512010-05-23 21:44:54 -0700260 qib_make_rc_ack(dev, qp, ohdr, pmtu))
261 goto done;
262
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800263 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
264 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
Ralph Campbellf9315512010-05-23 21:44:54 -0700265 goto bail;
266 /* We are in the error state, flush the work request. */
267 if (qp->s_last == qp->s_head)
268 goto bail;
269 /* If DMAs are in progress, we can't flush immediately. */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800270 if (atomic_read(&priv->s_dma_busy)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800271 qp->s_flags |= RVT_S_WAIT_DMA;
Ralph Campbellf9315512010-05-23 21:44:54 -0700272 goto bail;
273 }
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800274 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Mike Marciniszyn30ab7e22011-11-04 08:26:52 -0400275 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
276 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
277 /* will get called again */
Ralph Campbellf9315512010-05-23 21:44:54 -0700278 goto done;
279 }
280
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800281 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700282 goto bail;
283
284 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
285 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800286 qp->s_flags |= RVT_S_WAIT_PSN;
Ralph Campbellf9315512010-05-23 21:44:54 -0700287 goto bail;
288 }
289 qp->s_sending_psn = qp->s_psn;
290 qp->s_sending_hpsn = qp->s_psn - 1;
291 }
292
293 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
294 hwords = 5;
295 bth0 = 0;
296
297 /* Send a request. */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800298 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
Ralph Campbellf9315512010-05-23 21:44:54 -0700299 switch (qp->s_state) {
300 default:
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800301 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700302 goto bail;
303 /*
304 * Resend an old request or start a new one.
305 *
306 * We keep track of the current SWQE so that
307 * we don't reset the "furthest progress" state
308 * if we need to back up.
309 */
310 newreq = 0;
311 if (qp->s_cur == qp->s_tail) {
312 /* Check if send work queue is empty. */
313 if (qp->s_tail == qp->s_head)
314 goto bail;
315 /*
316 * If a fence is requested, wait for previous
317 * RDMA read and atomic operations to finish.
318 */
319 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
320 qp->s_num_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800321 qp->s_flags |= RVT_S_WAIT_FENCE;
Ralph Campbellf9315512010-05-23 21:44:54 -0700322 goto bail;
323 }
324 wqe->psn = qp->s_next_psn;
325 newreq = 1;
326 }
327 /*
328 * Note that we have to be careful not to modify the
329 * original work request since we may need to resend
330 * it.
331 */
332 len = wqe->length;
333 ss = &qp->s_sge;
334 bth2 = qp->s_psn & QIB_PSN_MASK;
335 switch (wqe->wr.opcode) {
336 case IB_WR_SEND:
337 case IB_WR_SEND_WITH_IMM:
338 /* If no credit, return. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800339 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
Ralph Campbellf9315512010-05-23 21:44:54 -0700340 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800341 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -0700342 goto bail;
343 }
344 wqe->lpsn = wqe->psn;
345 if (len > pmtu) {
346 wqe->lpsn += (len - 1) / pmtu;
347 qp->s_state = OP(SEND_FIRST);
348 len = pmtu;
349 break;
350 }
351 if (wqe->wr.opcode == IB_WR_SEND)
352 qp->s_state = OP(SEND_ONLY);
353 else {
354 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
355 /* Immediate data comes after the BTH */
356 ohdr->u.imm_data = wqe->wr.ex.imm_data;
357 hwords += 1;
358 }
359 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
360 bth0 |= IB_BTH_SOLICITED;
361 bth2 |= IB_BTH_REQ_ACK;
362 if (++qp->s_cur == qp->s_size)
363 qp->s_cur = 0;
364 break;
365
366 case IB_WR_RDMA_WRITE:
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800367 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
Ralph Campbellf9315512010-05-23 21:44:54 -0700368 qp->s_lsn++;
369 /* FALLTHROUGH */
370 case IB_WR_RDMA_WRITE_WITH_IMM:
371 /* If no credit, return. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800372 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
Ralph Campbellf9315512010-05-23 21:44:54 -0700373 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800374 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -0700375 goto bail;
376 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100377
Ralph Campbellf9315512010-05-23 21:44:54 -0700378 ohdr->u.rc.reth.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100379 cpu_to_be64(wqe->rdma_wr.remote_addr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700380 ohdr->u.rc.reth.rkey =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100381 cpu_to_be32(wqe->rdma_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700382 ohdr->u.rc.reth.length = cpu_to_be32(len);
383 hwords += sizeof(struct ib_reth) / sizeof(u32);
384 wqe->lpsn = wqe->psn;
385 if (len > pmtu) {
386 wqe->lpsn += (len - 1) / pmtu;
387 qp->s_state = OP(RDMA_WRITE_FIRST);
388 len = pmtu;
389 break;
390 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100391 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
Ralph Campbellf9315512010-05-23 21:44:54 -0700392 qp->s_state = OP(RDMA_WRITE_ONLY);
393 else {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100394 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
Ralph Campbellf9315512010-05-23 21:44:54 -0700395 /* Immediate data comes after RETH */
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100396 ohdr->u.rc.imm_data =
397 wqe->rdma_wr.wr.ex.imm_data;
Ralph Campbellf9315512010-05-23 21:44:54 -0700398 hwords += 1;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100399 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
Ralph Campbellf9315512010-05-23 21:44:54 -0700400 bth0 |= IB_BTH_SOLICITED;
401 }
402 bth2 |= IB_BTH_REQ_ACK;
403 if (++qp->s_cur == qp->s_size)
404 qp->s_cur = 0;
405 break;
406
407 case IB_WR_RDMA_READ:
408 /*
409 * Don't allow more operations to be started
410 * than the QP limits allow.
411 */
412 if (newreq) {
413 if (qp->s_num_rd_atomic >=
414 qp->s_max_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800415 qp->s_flags |= RVT_S_WAIT_RDMAR;
Ralph Campbellf9315512010-05-23 21:44:54 -0700416 goto bail;
417 }
418 qp->s_num_rd_atomic++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800419 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
Ralph Campbellf9315512010-05-23 21:44:54 -0700420 qp->s_lsn++;
421 /*
422 * Adjust s_next_psn to count the
423 * expected number of responses.
424 */
425 if (len > pmtu)
426 qp->s_next_psn += (len - 1) / pmtu;
427 wqe->lpsn = qp->s_next_psn++;
428 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100429
Ralph Campbellf9315512010-05-23 21:44:54 -0700430 ohdr->u.rc.reth.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100431 cpu_to_be64(wqe->rdma_wr.remote_addr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700432 ohdr->u.rc.reth.rkey =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100433 cpu_to_be32(wqe->rdma_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700434 ohdr->u.rc.reth.length = cpu_to_be32(len);
435 qp->s_state = OP(RDMA_READ_REQUEST);
436 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
437 ss = NULL;
438 len = 0;
439 bth2 |= IB_BTH_REQ_ACK;
440 if (++qp->s_cur == qp->s_size)
441 qp->s_cur = 0;
442 break;
443
444 case IB_WR_ATOMIC_CMP_AND_SWP:
445 case IB_WR_ATOMIC_FETCH_AND_ADD:
446 /*
447 * Don't allow more operations to be started
448 * than the QP limits allow.
449 */
450 if (newreq) {
451 if (qp->s_num_rd_atomic >=
452 qp->s_max_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800453 qp->s_flags |= RVT_S_WAIT_RDMAR;
Ralph Campbellf9315512010-05-23 21:44:54 -0700454 goto bail;
455 }
456 qp->s_num_rd_atomic++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800457 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
Ralph Campbellf9315512010-05-23 21:44:54 -0700458 qp->s_lsn++;
459 wqe->lpsn = wqe->psn;
460 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100461 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700462 qp->s_state = OP(COMPARE_SWAP);
463 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100464 wqe->atomic_wr.swap);
Ralph Campbellf9315512010-05-23 21:44:54 -0700465 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100466 wqe->atomic_wr.compare_add);
Ralph Campbellf9315512010-05-23 21:44:54 -0700467 } else {
468 qp->s_state = OP(FETCH_ADD);
469 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100470 wqe->atomic_wr.compare_add);
Ralph Campbellf9315512010-05-23 21:44:54 -0700471 ohdr->u.atomic_eth.compare_data = 0;
472 }
473 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100474 wqe->atomic_wr.remote_addr >> 32);
Ralph Campbellf9315512010-05-23 21:44:54 -0700475 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100476 wqe->atomic_wr.remote_addr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700477 ohdr->u.atomic_eth.rkey = cpu_to_be32(
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100478 wqe->atomic_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700479 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
480 ss = NULL;
481 len = 0;
482 bth2 |= IB_BTH_REQ_ACK;
483 if (++qp->s_cur == qp->s_size)
484 qp->s_cur = 0;
485 break;
486
487 default:
488 goto bail;
489 }
490 qp->s_sge.sge = wqe->sg_list[0];
491 qp->s_sge.sg_list = wqe->sg_list + 1;
492 qp->s_sge.num_sge = wqe->wr.num_sge;
493 qp->s_sge.total_len = wqe->length;
494 qp->s_len = wqe->length;
495 if (newreq) {
496 qp->s_tail++;
497 if (qp->s_tail >= qp->s_size)
498 qp->s_tail = 0;
499 }
500 if (wqe->wr.opcode == IB_WR_RDMA_READ)
501 qp->s_psn = wqe->lpsn + 1;
502 else {
503 qp->s_psn++;
504 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
505 qp->s_next_psn = qp->s_psn;
506 }
507 break;
508
509 case OP(RDMA_READ_RESPONSE_FIRST):
510 /*
511 * qp->s_state is normally set to the opcode of the
512 * last packet constructed for new requests and therefore
513 * is never set to RDMA read response.
514 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
515 * thread to indicate a SEND needs to be restarted from an
516 * earlier PSN without interferring with the sending thread.
517 * See qib_restart_rc().
518 */
519 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
520 /* FALLTHROUGH */
521 case OP(SEND_FIRST):
522 qp->s_state = OP(SEND_MIDDLE);
523 /* FALLTHROUGH */
524 case OP(SEND_MIDDLE):
525 bth2 = qp->s_psn++ & QIB_PSN_MASK;
526 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
527 qp->s_next_psn = qp->s_psn;
528 ss = &qp->s_sge;
529 len = qp->s_len;
530 if (len > pmtu) {
531 len = pmtu;
532 break;
533 }
534 if (wqe->wr.opcode == IB_WR_SEND)
535 qp->s_state = OP(SEND_LAST);
536 else {
537 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
538 /* Immediate data comes after the BTH */
539 ohdr->u.imm_data = wqe->wr.ex.imm_data;
540 hwords += 1;
541 }
542 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
543 bth0 |= IB_BTH_SOLICITED;
544 bth2 |= IB_BTH_REQ_ACK;
545 qp->s_cur++;
546 if (qp->s_cur >= qp->s_size)
547 qp->s_cur = 0;
548 break;
549
550 case OP(RDMA_READ_RESPONSE_LAST):
551 /*
552 * qp->s_state is normally set to the opcode of the
553 * last packet constructed for new requests and therefore
554 * is never set to RDMA read response.
555 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
556 * thread to indicate a RDMA write needs to be restarted from
557 * an earlier PSN without interferring with the sending thread.
558 * See qib_restart_rc().
559 */
560 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
561 /* FALLTHROUGH */
562 case OP(RDMA_WRITE_FIRST):
563 qp->s_state = OP(RDMA_WRITE_MIDDLE);
564 /* FALLTHROUGH */
565 case OP(RDMA_WRITE_MIDDLE):
566 bth2 = qp->s_psn++ & QIB_PSN_MASK;
567 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
568 qp->s_next_psn = qp->s_psn;
569 ss = &qp->s_sge;
570 len = qp->s_len;
571 if (len > pmtu) {
572 len = pmtu;
573 break;
574 }
575 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
576 qp->s_state = OP(RDMA_WRITE_LAST);
577 else {
578 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
579 /* Immediate data comes after the BTH */
580 ohdr->u.imm_data = wqe->wr.ex.imm_data;
581 hwords += 1;
582 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
583 bth0 |= IB_BTH_SOLICITED;
584 }
585 bth2 |= IB_BTH_REQ_ACK;
586 qp->s_cur++;
587 if (qp->s_cur >= qp->s_size)
588 qp->s_cur = 0;
589 break;
590
591 case OP(RDMA_READ_RESPONSE_MIDDLE):
592 /*
593 * qp->s_state is normally set to the opcode of the
594 * last packet constructed for new requests and therefore
595 * is never set to RDMA read response.
596 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
597 * thread to indicate a RDMA read needs to be restarted from
598 * an earlier PSN without interferring with the sending thread.
599 * See qib_restart_rc().
600 */
601 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
602 ohdr->u.rc.reth.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100603 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
Ralph Campbellf9315512010-05-23 21:44:54 -0700604 ohdr->u.rc.reth.rkey =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100605 cpu_to_be32(wqe->rdma_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700606 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
607 qp->s_state = OP(RDMA_READ_REQUEST);
608 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
609 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
610 qp->s_psn = wqe->lpsn + 1;
611 ss = NULL;
612 len = 0;
613 qp->s_cur++;
614 if (qp->s_cur == qp->s_size)
615 qp->s_cur = 0;
616 break;
617 }
618 qp->s_sending_hpsn = bth2;
619 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
620 if (delta && delta % QIB_PSN_CREDIT == 0)
621 bth2 |= IB_BTH_REQ_ACK;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800622 if (qp->s_flags & RVT_S_SEND_ONE) {
623 qp->s_flags &= ~RVT_S_SEND_ONE;
624 qp->s_flags |= RVT_S_WAIT_ACK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700625 bth2 |= IB_BTH_REQ_ACK;
626 }
627 qp->s_len -= len;
628 qp->s_hdrwords = hwords;
629 qp->s_cur_sge = ss;
630 qp->s_cur_size = len;
631 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
632done:
633 ret = 1;
634 goto unlock;
635
636bail:
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800637 qp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700638unlock:
639 spin_unlock_irqrestore(&qp->s_lock, flags);
640 return ret;
641}
642
643/**
644 * qib_send_rc_ack - Construct an ACK packet and send it
645 * @qp: a pointer to the QP
646 *
647 * This is called from qib_rc_rcv() and qib_kreceive().
648 * Note that RDMA reads and atomics are handled in the
649 * send side QP state and tasklet.
650 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800651void qib_send_rc_ack(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700652{
653 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
654 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
655 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
656 u64 pbc;
657 u16 lrh0;
658 u32 bth0;
659 u32 hwords;
660 u32 pbufn;
661 u32 __iomem *piobuf;
662 struct qib_ib_header hdr;
663 struct qib_other_headers *ohdr;
664 u32 control;
665 unsigned long flags;
666
667 spin_lock_irqsave(&qp->s_lock, flags);
668
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800669 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700670 goto unlock;
671
672 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800673 if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
Ralph Campbellf9315512010-05-23 21:44:54 -0700674 goto queue_ack;
675
676 /* Construct the header with s_lock held so APM doesn't change it. */
677 ohdr = &hdr.u.oth;
678 lrh0 = QIB_LRH_BTH;
679 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
680 hwords = 6;
681 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
682 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
683 &qp->remote_ah_attr.grh, hwords, 0);
684 ohdr = &hdr.u.l.oth;
685 lrh0 = QIB_LRH_GRH;
686 }
687 /* read pkey_index w/o lock (its atomic) */
688 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
689 if (qp->s_mig_state == IB_MIG_MIGRATED)
690 bth0 |= IB_BTH_MIG_REQ;
691 if (qp->r_nak_state)
692 ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
693 (qp->r_nak_state <<
694 QIB_AETH_CREDIT_SHIFT));
695 else
696 ohdr->u.aeth = qib_compute_aeth(qp);
697 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
698 qp->remote_ah_attr.sl << 4;
699 hdr.lrh[0] = cpu_to_be16(lrh0);
700 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
701 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
702 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
703 ohdr->bth[0] = cpu_to_be32(bth0);
704 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
705 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
706
707 spin_unlock_irqrestore(&qp->s_lock, flags);
708
709 /* Don't try to send ACKs if the link isn't ACTIVE */
710 if (!(ppd->lflags & QIBL_LINKACTIVE))
711 goto done;
712
713 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
714 qp->s_srate, lrh0 >> 12);
715 /* length is + 1 for the control dword */
716 pbc = ((u64) control << 32) | (hwords + 1);
717
718 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
719 if (!piobuf) {
720 /*
721 * We are out of PIO buffers at the moment.
722 * Pass responsibility for sending the ACK to the
723 * send tasklet so that when a PIO buffer becomes
724 * available, the ACK is sent ahead of other outgoing
725 * packets.
726 */
727 spin_lock_irqsave(&qp->s_lock, flags);
728 goto queue_ack;
729 }
730
731 /*
732 * Write the pbc.
733 * We have to flush after the PBC for correctness
734 * on some cpus or WC buffer can be written out of order.
735 */
736 writeq(pbc, piobuf);
737
738 if (dd->flags & QIB_PIO_FLUSH_WC) {
739 u32 *hdrp = (u32 *) &hdr;
740
741 qib_flush_wc();
742 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
743 qib_flush_wc();
744 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
745 } else
746 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
747
748 if (dd->flags & QIB_USE_SPCL_TRIG) {
749 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
750
751 qib_flush_wc();
752 __raw_writel(0xaebecede, piobuf + spcl_off);
753 }
754
755 qib_flush_wc();
756 qib_sendbuf_done(dd, pbufn);
757
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500758 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
Ralph Campbellf9315512010-05-23 21:44:54 -0700759 goto done;
760
761queue_ack:
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800762 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800763 this_cpu_inc(*ibp->rvp.rc_qacks);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800764 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -0700765 qp->s_nak_state = qp->r_nak_state;
766 qp->s_ack_psn = qp->r_ack_psn;
767
768 /* Schedule the send tasklet. */
769 qib_schedule_send(qp);
770 }
771unlock:
772 spin_unlock_irqrestore(&qp->s_lock, flags);
773done:
774 return;
775}
776
777/**
778 * reset_psn - reset the QP state to send starting from PSN
779 * @qp: the QP
780 * @psn: the packet sequence number to restart at
781 *
782 * This is called from qib_rc_rcv() to process an incoming RC ACK
783 * for the given QP.
784 * Called at interrupt level with the QP s_lock held.
785 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800786static void reset_psn(struct rvt_qp *qp, u32 psn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700787{
788 u32 n = qp->s_acked;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800789 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
Ralph Campbellf9315512010-05-23 21:44:54 -0700790 u32 opcode;
791
792 qp->s_cur = n;
793
794 /*
795 * If we are starting the request from the beginning,
796 * let the normal send code handle initialization.
797 */
798 if (qib_cmp24(psn, wqe->psn) <= 0) {
799 qp->s_state = OP(SEND_LAST);
800 goto done;
801 }
802
803 /* Find the work request opcode corresponding to the given PSN. */
804 opcode = wqe->wr.opcode;
805 for (;;) {
806 int diff;
807
808 if (++n == qp->s_size)
809 n = 0;
810 if (n == qp->s_tail)
811 break;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800812 wqe = rvt_get_swqe_ptr(qp, n);
Ralph Campbellf9315512010-05-23 21:44:54 -0700813 diff = qib_cmp24(psn, wqe->psn);
814 if (diff < 0)
815 break;
816 qp->s_cur = n;
817 /*
818 * If we are starting the request from the beginning,
819 * let the normal send code handle initialization.
820 */
821 if (diff == 0) {
822 qp->s_state = OP(SEND_LAST);
823 goto done;
824 }
825 opcode = wqe->wr.opcode;
826 }
827
828 /*
829 * Set the state to restart in the middle of a request.
830 * Don't change the s_sge, s_cur_sge, or s_cur_size.
831 * See qib_make_rc_req().
832 */
833 switch (opcode) {
834 case IB_WR_SEND:
835 case IB_WR_SEND_WITH_IMM:
836 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
837 break;
838
839 case IB_WR_RDMA_WRITE:
840 case IB_WR_RDMA_WRITE_WITH_IMM:
841 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
842 break;
843
844 case IB_WR_RDMA_READ:
845 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
846 break;
847
848 default:
849 /*
850 * This case shouldn't happen since its only
851 * one PSN per req.
852 */
853 qp->s_state = OP(SEND_LAST);
854 }
855done:
856 qp->s_psn = psn;
857 /*
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800858 * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
Ralph Campbellf9315512010-05-23 21:44:54 -0700859 * asynchronously before the send tasklet can get scheduled.
860 * Doing it in qib_make_rc_req() is too late.
861 */
862 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
863 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800864 qp->s_flags |= RVT_S_WAIT_PSN;
Ralph Campbellf9315512010-05-23 21:44:54 -0700865}
866
867/*
868 * Back up requester to resend the last un-ACKed request.
Ralph Campbella5210c12010-08-02 22:39:30 +0000869 * The QP r_lock and s_lock should be held and interrupts disabled.
Ralph Campbellf9315512010-05-23 21:44:54 -0700870 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800871static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
Ralph Campbellf9315512010-05-23 21:44:54 -0700872{
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800873 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -0700874 struct qib_ibport *ibp;
875
876 if (qp->s_retry == 0) {
877 if (qp->s_mig_state == IB_MIG_ARMED) {
878 qib_migrate_qp(qp);
879 qp->s_retry = qp->s_retry_cnt;
880 } else if (qp->s_last == qp->s_acked) {
881 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
Harish Chegondi70696ea2016-02-03 14:20:27 -0800882 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -0700883 return;
884 } else /* XXX need to handle delayed completion */
885 return;
886 } else
887 qp->s_retry--;
888
889 ibp = to_iport(qp->ibqp.device, qp->port_num);
890 if (wqe->wr.opcode == IB_WR_RDMA_READ)
Harish Chegondif24a6d42016-01-22 12:56:02 -0800891 ibp->rvp.n_rc_resends++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700892 else
Harish Chegondif24a6d42016-01-22 12:56:02 -0800893 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700894
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800895 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
896 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
897 RVT_S_WAIT_ACK);
Ralph Campbellf9315512010-05-23 21:44:54 -0700898 if (wait)
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800899 qp->s_flags |= RVT_S_SEND_ONE;
Ralph Campbellf9315512010-05-23 21:44:54 -0700900 reset_psn(qp, psn);
901}
902
903/*
904 * This is called from s_timer for missing responses.
905 */
906static void rc_timeout(unsigned long arg)
907{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800908 struct rvt_qp *qp = (struct rvt_qp *)arg;
Ralph Campbellf9315512010-05-23 21:44:54 -0700909 struct qib_ibport *ibp;
910 unsigned long flags;
911
Ralph Campbella5210c12010-08-02 22:39:30 +0000912 spin_lock_irqsave(&qp->r_lock, flags);
913 spin_lock(&qp->s_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800914 if (qp->s_flags & RVT_S_TIMER) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700915 ibp = to_iport(qp->ibqp.device, qp->port_num);
Harish Chegondif24a6d42016-01-22 12:56:02 -0800916 ibp->rvp.n_rc_timeouts++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800917 qp->s_flags &= ~RVT_S_TIMER;
Ralph Campbellf9315512010-05-23 21:44:54 -0700918 del_timer(&qp->s_timer);
919 qib_restart_rc(qp, qp->s_last_psn + 1, 1);
920 qib_schedule_send(qp);
921 }
Ralph Campbella5210c12010-08-02 22:39:30 +0000922 spin_unlock(&qp->s_lock);
923 spin_unlock_irqrestore(&qp->r_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700924}
925
926/*
927 * This is called from s_timer for RNR timeouts.
928 */
929void qib_rc_rnr_retry(unsigned long arg)
930{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800931 struct rvt_qp *qp = (struct rvt_qp *)arg;
Ralph Campbellf9315512010-05-23 21:44:54 -0700932 unsigned long flags;
933
934 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800935 if (qp->s_flags & RVT_S_WAIT_RNR) {
936 qp->s_flags &= ~RVT_S_WAIT_RNR;
Ralph Campbellf9315512010-05-23 21:44:54 -0700937 del_timer(&qp->s_timer);
938 qib_schedule_send(qp);
939 }
940 spin_unlock_irqrestore(&qp->s_lock, flags);
941}
942
943/*
944 * Set qp->s_sending_psn to the next PSN after the given one.
945 * This would be psn+1 except when RDMA reads are present.
946 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800947static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700948{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800949 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700950 u32 n = qp->s_last;
951
952 /* Find the work request corresponding to the given PSN. */
953 for (;;) {
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800954 wqe = rvt_get_swqe_ptr(qp, n);
Ralph Campbellf9315512010-05-23 21:44:54 -0700955 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
956 if (wqe->wr.opcode == IB_WR_RDMA_READ)
957 qp->s_sending_psn = wqe->lpsn + 1;
958 else
959 qp->s_sending_psn = psn + 1;
960 break;
961 }
962 if (++n == qp->s_size)
963 n = 0;
964 if (n == qp->s_tail)
965 break;
966 }
967}
968
969/*
970 * This should be called with the QP s_lock held and interrupts disabled.
971 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800972void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
Ralph Campbellf9315512010-05-23 21:44:54 -0700973{
974 struct qib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800975 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700976 struct ib_wc wc;
977 unsigned i;
978 u32 opcode;
979 u32 psn;
980
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800981 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
Ralph Campbellf9315512010-05-23 21:44:54 -0700982 return;
983
984 /* Find out where the BTH is */
985 if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
986 ohdr = &hdr->u.oth;
987 else
988 ohdr = &hdr->u.l.oth;
989
990 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
991 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
992 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
993 WARN_ON(!qp->s_rdma_ack_cnt);
994 qp->s_rdma_ack_cnt--;
995 return;
996 }
997
998 psn = be32_to_cpu(ohdr->bth[2]);
999 reset_sending_psn(qp, psn);
1000
1001 /*
1002 * Start timer after a packet requesting an ACK has been sent and
1003 * there are still requests that haven't been acked.
1004 */
1005 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001006 !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001007 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -07001008 start_timer(qp);
1009
1010 while (qp->s_last != qp->s_acked) {
Mike Marciniszynee845412016-02-04 11:03:28 -08001011 u32 s_last;
1012
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001013 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Ralph Campbellf9315512010-05-23 21:44:54 -07001014 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1015 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1016 break;
Mike Marciniszynee845412016-02-04 11:03:28 -08001017 s_last = qp->s_last;
1018 if (++s_last >= qp->s_size)
1019 s_last = 0;
1020 qp->s_last = s_last;
1021 /* see post_send() */
1022 barrier();
Ralph Campbellf9315512010-05-23 21:44:54 -07001023 for (i = 0; i < wqe->wr.num_sge; i++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001024 struct rvt_sge *sge = &wqe->sg_list[i];
Ralph Campbellf9315512010-05-23 21:44:54 -07001025
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001026 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001027 }
1028 /* Post a send completion queue entry if requested. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001029 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
Ralph Campbellf9315512010-05-23 21:44:54 -07001030 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001031 memset(&wc, 0, sizeof(wc));
Ralph Campbellf9315512010-05-23 21:44:54 -07001032 wc.wr_id = wqe->wr.wr_id;
1033 wc.status = IB_WC_SUCCESS;
1034 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1035 wc.byte_len = wqe->length;
1036 wc.qp = &qp->ibqp;
Harish Chegondi4bb88e52016-01-22 13:07:36 -08001037 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
Ralph Campbellf9315512010-05-23 21:44:54 -07001038 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001039 }
1040 /*
1041 * If we were waiting for sends to complete before resending,
1042 * and they are now complete, restart sending.
1043 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001044 if (qp->s_flags & RVT_S_WAIT_PSN &&
Ralph Campbellf9315512010-05-23 21:44:54 -07001045 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001046 qp->s_flags &= ~RVT_S_WAIT_PSN;
Ralph Campbellf9315512010-05-23 21:44:54 -07001047 qp->s_sending_psn = qp->s_psn;
1048 qp->s_sending_hpsn = qp->s_psn - 1;
1049 qib_schedule_send(qp);
1050 }
1051}
1052
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001053static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
Ralph Campbellf9315512010-05-23 21:44:54 -07001054{
1055 qp->s_last_psn = psn;
1056}
1057
1058/*
1059 * Generate a SWQE completion.
1060 * This is similar to qib_send_complete but has to check to be sure
1061 * that the SGEs are not being referenced if the SWQE is being resent.
1062 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001063static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1064 struct rvt_swqe *wqe,
Ralph Campbellf9315512010-05-23 21:44:54 -07001065 struct qib_ibport *ibp)
1066{
1067 struct ib_wc wc;
1068 unsigned i;
1069
1070 /*
1071 * Don't decrement refcount and don't generate a
1072 * completion if the SWQE is being resent until the send
1073 * is finished.
1074 */
1075 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
1076 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
Mike Marciniszynee845412016-02-04 11:03:28 -08001077 u32 s_last;
1078
Ralph Campbellf9315512010-05-23 21:44:54 -07001079 for (i = 0; i < wqe->wr.num_sge; i++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001080 struct rvt_sge *sge = &wqe->sg_list[i];
Ralph Campbellf9315512010-05-23 21:44:54 -07001081
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001082 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001083 }
Mike Marciniszynee845412016-02-04 11:03:28 -08001084 s_last = qp->s_last;
1085 if (++s_last >= qp->s_size)
1086 s_last = 0;
1087 qp->s_last = s_last;
1088 /* see post_send() */
1089 barrier();
Ralph Campbellf9315512010-05-23 21:44:54 -07001090 /* Post a send completion queue entry if requested. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001091 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
Ralph Campbellf9315512010-05-23 21:44:54 -07001092 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001093 memset(&wc, 0, sizeof(wc));
Ralph Campbellf9315512010-05-23 21:44:54 -07001094 wc.wr_id = wqe->wr.wr_id;
1095 wc.status = IB_WC_SUCCESS;
1096 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1097 wc.byte_len = wqe->length;
1098 wc.qp = &qp->ibqp;
Harish Chegondi4bb88e52016-01-22 13:07:36 -08001099 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
Ralph Campbellf9315512010-05-23 21:44:54 -07001100 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001101 } else
Harish Chegondif24a6d42016-01-22 12:56:02 -08001102 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001103
1104 qp->s_retry = qp->s_retry_cnt;
1105 update_last_psn(qp, wqe->lpsn);
1106
1107 /*
1108 * If we are completing a request which is in the process of
1109 * being resent, we can stop resending it since we know the
1110 * responder has already seen it.
1111 */
1112 if (qp->s_acked == qp->s_cur) {
1113 if (++qp->s_cur >= qp->s_size)
1114 qp->s_cur = 0;
1115 qp->s_acked = qp->s_cur;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001116 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
Ralph Campbellf9315512010-05-23 21:44:54 -07001117 if (qp->s_acked != qp->s_tail) {
1118 qp->s_state = OP(SEND_LAST);
1119 qp->s_psn = wqe->psn;
1120 }
1121 } else {
1122 if (++qp->s_acked >= qp->s_size)
1123 qp->s_acked = 0;
1124 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1125 qp->s_draining = 0;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001126 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001127 }
1128 return wqe;
1129}
1130
1131/**
1132 * do_rc_ack - process an incoming RC ACK
1133 * @qp: the QP the ACK came in on
1134 * @psn: the packet sequence number of the ACK
1135 * @opcode: the opcode of the request that resulted in the ACK
1136 *
1137 * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1138 * for the given QP.
1139 * Called at interrupt level with the QP s_lock held.
1140 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1141 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001142static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
Ralph Campbellf9315512010-05-23 21:44:54 -07001143 u64 val, struct qib_ctxtdata *rcd)
1144{
1145 struct qib_ibport *ibp;
1146 enum ib_wc_status status;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001147 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -07001148 int ret = 0;
1149 u32 ack_psn;
1150 int diff;
1151
1152 /* Remove QP from retry timer */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001153 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1154 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001155 del_timer(&qp->s_timer);
1156 }
1157
1158 /*
1159 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1160 * requests and implicitly NAK RDMA read and atomic requests issued
1161 * before the NAK'ed request. The MSN won't include the NAK'ed
1162 * request but will include an ACK'ed request(s).
1163 */
1164 ack_psn = psn;
1165 if (aeth >> 29)
1166 ack_psn--;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001167 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001168 ibp = to_iport(qp->ibqp.device, qp->port_num);
1169
1170 /*
1171 * The MSN might be for a later WQE than the PSN indicates so
1172 * only complete WQEs that the PSN finishes.
1173 */
1174 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1175 /*
1176 * RDMA_READ_RESPONSE_ONLY is a special case since
1177 * we want to generate completion events for everything
1178 * before the RDMA read, copy the data, then generate
1179 * the completion for the read.
1180 */
1181 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1182 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1183 diff == 0) {
1184 ret = 1;
1185 goto bail;
1186 }
1187 /*
1188 * If this request is a RDMA read or atomic, and the ACK is
1189 * for a later operation, this ACK NAKs the RDMA read or
1190 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1191 * can ACK a RDMA read and likewise for atomic ops. Note
1192 * that the NAK case can only happen if relaxed ordering is
1193 * used and requests are sent after an RDMA read or atomic
1194 * is sent but before the response is received.
1195 */
1196 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1197 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1198 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1199 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1200 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1201 /* Retry this request. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001202 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1203 qp->r_flags |= RVT_R_RDMAR_SEQ;
Ralph Campbellf9315512010-05-23 21:44:54 -07001204 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1205 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001206 qp->r_flags |= RVT_R_RSP_SEND;
Ralph Campbellf9315512010-05-23 21:44:54 -07001207 atomic_inc(&qp->refcount);
1208 list_add_tail(&qp->rspwait,
1209 &rcd->qp_wait_list);
1210 }
1211 }
1212 /*
1213 * No need to process the ACK/NAK since we are
1214 * restarting an earlier request.
1215 */
1216 goto bail;
1217 }
1218 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1219 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1220 u64 *vaddr = wqe->sg_list[0].vaddr;
1221 *vaddr = val;
1222 }
1223 if (qp->s_num_rd_atomic &&
1224 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1225 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1226 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1227 qp->s_num_rd_atomic--;
1228 /* Restart sending task if fence is complete */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001229 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
Ralph Campbellf9315512010-05-23 21:44:54 -07001230 !qp->s_num_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001231 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1232 RVT_S_WAIT_ACK);
Ralph Campbellf9315512010-05-23 21:44:54 -07001233 qib_schedule_send(qp);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001234 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1235 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1236 RVT_S_WAIT_ACK);
Ralph Campbellf9315512010-05-23 21:44:54 -07001237 qib_schedule_send(qp);
1238 }
1239 }
1240 wqe = do_rc_completion(qp, wqe, ibp);
1241 if (qp->s_acked == qp->s_tail)
1242 break;
1243 }
1244
1245 switch (aeth >> 29) {
1246 case 0: /* ACK */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001247 this_cpu_inc(*ibp->rvp.rc_acks);
Ralph Campbellf9315512010-05-23 21:44:54 -07001248 if (qp->s_acked != qp->s_tail) {
1249 /*
1250 * We are expecting more ACKs so
1251 * reset the retransmit timer.
1252 */
1253 start_timer(qp);
1254 /*
1255 * We can stop resending the earlier packets and
1256 * continue with the next packet the receiver wants.
1257 */
1258 if (qib_cmp24(qp->s_psn, psn) <= 0)
1259 reset_psn(qp, psn + 1);
1260 } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
1261 qp->s_state = OP(SEND_LAST);
1262 qp->s_psn = psn + 1;
1263 }
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001264 if (qp->s_flags & RVT_S_WAIT_ACK) {
1265 qp->s_flags &= ~RVT_S_WAIT_ACK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001266 qib_schedule_send(qp);
1267 }
1268 qib_get_credit(qp, aeth);
1269 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1270 qp->s_retry = qp->s_retry_cnt;
1271 update_last_psn(qp, psn);
1272 ret = 1;
1273 goto bail;
1274
1275 case 1: /* RNR NAK */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001276 ibp->rvp.n_rnr_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001277 if (qp->s_acked == qp->s_tail)
1278 goto bail;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001279 if (qp->s_flags & RVT_S_WAIT_RNR)
Ralph Campbellf9315512010-05-23 21:44:54 -07001280 goto bail;
1281 if (qp->s_rnr_retry == 0) {
1282 status = IB_WC_RNR_RETRY_EXC_ERR;
1283 goto class_b;
1284 }
1285 if (qp->s_rnr_retry_cnt < 7)
1286 qp->s_rnr_retry--;
1287
1288 /* The last valid PSN is the previous PSN. */
1289 update_last_psn(qp, psn - 1);
1290
Harish Chegondif24a6d42016-01-22 12:56:02 -08001291 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001292
1293 reset_psn(qp, psn);
1294
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001295 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1296 qp->s_flags |= RVT_S_WAIT_RNR;
Ralph Campbellf9315512010-05-23 21:44:54 -07001297 qp->s_timer.function = qib_rc_rnr_retry;
1298 qp->s_timer.expires = jiffies + usecs_to_jiffies(
1299 ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
1300 QIB_AETH_CREDIT_MASK]);
1301 add_timer(&qp->s_timer);
1302 goto bail;
1303
1304 case 3: /* NAK */
1305 if (qp->s_acked == qp->s_tail)
1306 goto bail;
1307 /* The last valid PSN is the previous PSN. */
1308 update_last_psn(qp, psn - 1);
1309 switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
1310 QIB_AETH_CREDIT_MASK) {
1311 case 0: /* PSN sequence error */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001312 ibp->rvp.n_seq_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001313 /*
1314 * Back up to the responder's expected PSN.
1315 * Note that we might get a NAK in the middle of an
1316 * RDMA READ response which terminates the RDMA
1317 * READ.
1318 */
1319 qib_restart_rc(qp, psn, 0);
1320 qib_schedule_send(qp);
1321 break;
1322
1323 case 1: /* Invalid Request */
1324 status = IB_WC_REM_INV_REQ_ERR;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001325 ibp->rvp.n_other_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001326 goto class_b;
1327
1328 case 2: /* Remote Access Error */
1329 status = IB_WC_REM_ACCESS_ERR;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001330 ibp->rvp.n_other_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001331 goto class_b;
1332
1333 case 3: /* Remote Operation Error */
1334 status = IB_WC_REM_OP_ERR;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001335 ibp->rvp.n_other_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001336class_b:
1337 if (qp->s_last == qp->s_acked) {
1338 qib_send_complete(qp, wqe, status);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001339 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001340 }
1341 break;
1342
1343 default:
1344 /* Ignore other reserved NAK error codes */
1345 goto reserved;
1346 }
1347 qp->s_retry = qp->s_retry_cnt;
1348 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1349 goto bail;
1350
1351 default: /* 2: reserved */
1352reserved:
1353 /* Ignore reserved NAK codes. */
1354 goto bail;
1355 }
1356
1357bail:
1358 return ret;
1359}
1360
1361/*
1362 * We have seen an out of sequence RDMA read middle or last packet.
1363 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1364 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001365static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
Ralph Campbellf9315512010-05-23 21:44:54 -07001366 struct qib_ctxtdata *rcd)
1367{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001368 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -07001369
1370 /* Remove QP from retry timer */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001371 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1372 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001373 del_timer(&qp->s_timer);
1374 }
1375
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001376 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001377
1378 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1379 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1380 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1381 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1382 break;
1383 wqe = do_rc_completion(qp, wqe, ibp);
1384 }
1385
Harish Chegondif24a6d42016-01-22 12:56:02 -08001386 ibp->rvp.n_rdma_seq++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001387 qp->r_flags |= RVT_R_RDMAR_SEQ;
Ralph Campbellf9315512010-05-23 21:44:54 -07001388 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1389 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001390 qp->r_flags |= RVT_R_RSP_SEND;
Ralph Campbellf9315512010-05-23 21:44:54 -07001391 atomic_inc(&qp->refcount);
1392 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1393 }
1394}
1395
1396/**
1397 * qib_rc_rcv_resp - process an incoming RC response packet
1398 * @ibp: the port this packet came in on
1399 * @ohdr: the other headers for this packet
1400 * @data: the packet data
1401 * @tlen: the packet length
1402 * @qp: the QP for this packet
1403 * @opcode: the opcode for this packet
1404 * @psn: the packet sequence number for this packet
1405 * @hdrsize: the header length
1406 * @pmtu: the path MTU
1407 *
1408 * This is called from qib_rc_rcv() to process an incoming RC response
1409 * packet for the given QP.
1410 * Called at interrupt level.
1411 */
1412static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1413 struct qib_other_headers *ohdr,
1414 void *data, u32 tlen,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001415 struct rvt_qp *qp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001416 u32 opcode,
1417 u32 psn, u32 hdrsize, u32 pmtu,
1418 struct qib_ctxtdata *rcd)
1419{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001420 struct rvt_swqe *wqe;
Mike Marciniszyndd04e432011-01-10 17:42:22 -08001421 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001422 enum ib_wc_status status;
1423 unsigned long flags;
1424 int diff;
1425 u32 pad;
1426 u32 aeth;
1427 u64 val;
1428
Mike Marciniszyndd04e432011-01-10 17:42:22 -08001429 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1430 /*
1431 * If ACK'd PSN on SDMA busy list try to make progress to
1432 * reclaim SDMA credits.
1433 */
1434 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1435 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1436
1437 /*
1438 * If send tasklet not running attempt to progress
1439 * SDMA queue.
1440 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001441 if (!(qp->s_flags & RVT_S_BUSY)) {
Mike Marciniszyndd04e432011-01-10 17:42:22 -08001442 /* Acquire SDMA Lock */
1443 spin_lock_irqsave(&ppd->sdma_lock, flags);
1444 /* Invoke sdma make progress */
1445 qib_sdma_make_progress(ppd);
1446 /* Release SDMA Lock */
1447 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1448 }
1449 }
1450 }
1451
Ralph Campbellf9315512010-05-23 21:44:54 -07001452 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001453 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Mike Marciniszyn414ed902011-02-10 14:11:28 +00001454 goto ack_done;
Ralph Campbellf9315512010-05-23 21:44:54 -07001455
Ralph Campbellf9315512010-05-23 21:44:54 -07001456 /* Ignore invalid responses. */
1457 if (qib_cmp24(psn, qp->s_next_psn) >= 0)
1458 goto ack_done;
1459
1460 /* Ignore duplicate responses. */
1461 diff = qib_cmp24(psn, qp->s_last_psn);
1462 if (unlikely(diff <= 0)) {
1463 /* Update credits for "ghost" ACKs */
1464 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1465 aeth = be32_to_cpu(ohdr->u.aeth);
1466 if ((aeth >> 29) == 0)
1467 qib_get_credit(qp, aeth);
1468 }
1469 goto ack_done;
1470 }
1471
1472 /*
1473 * Skip everything other than the PSN we expect, if we are waiting
1474 * for a reply to a restarted RDMA read or atomic op.
1475 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001476 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001477 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1478 goto ack_done;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001479 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
Ralph Campbellf9315512010-05-23 21:44:54 -07001480 }
1481
1482 if (unlikely(qp->s_acked == qp->s_tail))
1483 goto ack_done;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001484 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001485 status = IB_WC_SUCCESS;
1486
1487 switch (opcode) {
1488 case OP(ACKNOWLEDGE):
1489 case OP(ATOMIC_ACKNOWLEDGE):
1490 case OP(RDMA_READ_RESPONSE_FIRST):
1491 aeth = be32_to_cpu(ohdr->u.aeth);
1492 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1493 __be32 *p = ohdr->u.at.atomic_ack_eth;
1494
1495 val = ((u64) be32_to_cpu(p[0]) << 32) |
1496 be32_to_cpu(p[1]);
1497 } else
1498 val = 0;
1499 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1500 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1501 goto ack_done;
1502 hdrsize += 4;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001503 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001504 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1505 goto ack_op_err;
1506 /*
1507 * If this is a response to a resent RDMA read, we
1508 * have to be careful to copy the data to the right
1509 * location.
1510 */
1511 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1512 wqe, psn, pmtu);
1513 goto read_middle;
1514
1515 case OP(RDMA_READ_RESPONSE_MIDDLE):
1516 /* no AETH, no ACK */
1517 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1518 goto ack_seq_err;
1519 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1520 goto ack_op_err;
1521read_middle:
1522 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1523 goto ack_len_err;
1524 if (unlikely(pmtu >= qp->s_rdma_read_len))
1525 goto ack_len_err;
1526
1527 /*
1528 * We got a response so update the timeout.
1529 * 4.096 usec. * (1 << qp->timeout)
1530 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001531 qp->s_flags |= RVT_S_TIMER;
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -04001532 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001533 if (qp->s_flags & RVT_S_WAIT_ACK) {
1534 qp->s_flags &= ~RVT_S_WAIT_ACK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001535 qib_schedule_send(qp);
1536 }
1537
1538 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1539 qp->s_retry = qp->s_retry_cnt;
1540
1541 /*
1542 * Update the RDMA receive state but do the copy w/o
1543 * holding the locks and blocking interrupts.
1544 */
1545 qp->s_rdma_read_len -= pmtu;
1546 update_last_psn(qp, psn);
1547 spin_unlock_irqrestore(&qp->s_lock, flags);
1548 qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
1549 goto bail;
1550
1551 case OP(RDMA_READ_RESPONSE_ONLY):
1552 aeth = be32_to_cpu(ohdr->u.aeth);
1553 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1554 goto ack_done;
1555 /* Get the number of bytes the message was padded by. */
1556 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1557 /*
1558 * Check that the data size is >= 0 && <= pmtu.
1559 * Remember to account for the AETH header (4) and
1560 * ICRC (4).
1561 */
1562 if (unlikely(tlen < (hdrsize + pad + 8)))
1563 goto ack_len_err;
1564 /*
1565 * If this is a response to a resent RDMA read, we
1566 * have to be careful to copy the data to the right
1567 * location.
1568 */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001569 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001570 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1571 wqe, psn, pmtu);
1572 goto read_last;
1573
1574 case OP(RDMA_READ_RESPONSE_LAST):
1575 /* ACKs READ req. */
1576 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1577 goto ack_seq_err;
1578 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1579 goto ack_op_err;
1580 /* Get the number of bytes the message was padded by. */
1581 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1582 /*
1583 * Check that the data size is >= 1 && <= pmtu.
1584 * Remember to account for the AETH header (4) and
1585 * ICRC (4).
1586 */
1587 if (unlikely(tlen <= (hdrsize + pad + 8)))
1588 goto ack_len_err;
1589read_last:
1590 tlen -= hdrsize + pad + 8;
1591 if (unlikely(tlen != qp->s_rdma_read_len))
1592 goto ack_len_err;
1593 aeth = be32_to_cpu(ohdr->u.aeth);
1594 qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
1595 WARN_ON(qp->s_rdma_read_sge.num_sge);
1596 (void) do_rc_ack(qp, aeth, psn,
1597 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1598 goto ack_done;
1599 }
1600
1601ack_op_err:
1602 status = IB_WC_LOC_QP_OP_ERR;
1603 goto ack_err;
1604
1605ack_seq_err:
1606 rdma_seq_err(qp, ibp, psn, rcd);
1607 goto ack_done;
1608
1609ack_len_err:
1610 status = IB_WC_LOC_LEN_ERR;
1611ack_err:
1612 if (qp->s_last == qp->s_acked) {
1613 qib_send_complete(qp, wqe, status);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001614 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001615 }
1616ack_done:
1617 spin_unlock_irqrestore(&qp->s_lock, flags);
1618bail:
1619 return;
1620}
1621
1622/**
1623 * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1624 * @ohdr: the other headers for this packet
1625 * @data: the packet data
1626 * @qp: the QP for this packet
1627 * @opcode: the opcode for this packet
1628 * @psn: the packet sequence number for this packet
1629 * @diff: the difference between the PSN and the expected PSN
1630 *
1631 * This is called from qib_rc_rcv() to process an unexpected
1632 * incoming RC packet for the given QP.
1633 * Called at interrupt level.
1634 * Return 1 if no more processing is needed; otherwise return 0 to
1635 * schedule a response to be sent.
1636 */
1637static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
1638 void *data,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001639 struct rvt_qp *qp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001640 u32 opcode,
1641 u32 psn,
1642 int diff,
1643 struct qib_ctxtdata *rcd)
1644{
1645 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001646 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -07001647 unsigned long flags;
1648 u8 i, prev;
1649 int old_req;
1650
1651 if (diff > 0) {
1652 /*
1653 * Packet sequence error.
1654 * A NAK will ACK earlier sends and RDMA writes.
1655 * Don't queue the NAK if we already sent one.
1656 */
1657 if (!qp->r_nak_state) {
Harish Chegondif24a6d42016-01-22 12:56:02 -08001658 ibp->rvp.n_rc_seqnak++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001659 qp->r_nak_state = IB_NAK_PSN_ERROR;
1660 /* Use the expected PSN. */
1661 qp->r_ack_psn = qp->r_psn;
1662 /*
1663 * Wait to send the sequence NAK until all packets
1664 * in the receive queue have been processed.
1665 * Otherwise, we end up propagating congestion.
1666 */
1667 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001668 qp->r_flags |= RVT_R_RSP_NAK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001669 atomic_inc(&qp->refcount);
1670 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1671 }
1672 }
1673 goto done;
1674 }
1675
1676 /*
1677 * Handle a duplicate request. Don't re-execute SEND, RDMA
1678 * write or atomic op. Don't NAK errors, just silently drop
1679 * the duplicate request. Note that r_sge, r_len, and
1680 * r_rcv_len may be in use so don't modify them.
1681 *
1682 * We are supposed to ACK the earliest duplicate PSN but we
1683 * can coalesce an outstanding duplicate ACK. We have to
1684 * send the earliest so that RDMA reads can be restarted at
1685 * the requester's expected PSN.
1686 *
1687 * First, find where this duplicate PSN falls within the
1688 * ACKs previously sent.
1689 * old_req is true if there is an older response that is scheduled
1690 * to be sent before sending this one.
1691 */
1692 e = NULL;
1693 old_req = 1;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001694 ibp->rvp.n_rc_dupreq++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001695
1696 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001697
1698 for (i = qp->r_head_ack_queue; ; i = prev) {
1699 if (i == qp->s_tail_ack_queue)
1700 old_req = 0;
1701 if (i)
1702 prev = i - 1;
1703 else
1704 prev = QIB_MAX_RDMA_ATOMIC;
1705 if (prev == qp->r_head_ack_queue) {
1706 e = NULL;
1707 break;
1708 }
1709 e = &qp->s_ack_queue[prev];
1710 if (!e->opcode) {
1711 e = NULL;
1712 break;
1713 }
1714 if (qib_cmp24(psn, e->psn) >= 0) {
1715 if (prev == qp->s_tail_ack_queue &&
1716 qib_cmp24(psn, e->lpsn) <= 0)
1717 old_req = 0;
1718 break;
1719 }
1720 }
1721 switch (opcode) {
1722 case OP(RDMA_READ_REQUEST): {
1723 struct ib_reth *reth;
1724 u32 offset;
1725 u32 len;
1726
1727 /*
1728 * If we didn't find the RDMA read request in the ack queue,
1729 * we can ignore this request.
1730 */
1731 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1732 goto unlock_done;
1733 /* RETH comes after BTH */
1734 reth = &ohdr->u.rc.reth;
1735 /*
1736 * Address range must be a subset of the original
1737 * request and start on pmtu boundaries.
1738 * We reuse the old ack_queue slot since the requester
1739 * should not back up and request an earlier PSN for the
1740 * same request.
1741 */
1742 offset = ((psn - e->psn) & QIB_PSN_MASK) *
Mike Marciniszyncc6ea132011-09-23 13:16:34 -04001743 qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -07001744 len = be32_to_cpu(reth->length);
1745 if (unlikely(offset + len != e->rdma_sge.sge_length))
1746 goto unlock_done;
1747 if (e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001748 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001749 e->rdma_sge.mr = NULL;
1750 }
1751 if (len != 0) {
1752 u32 rkey = be32_to_cpu(reth->rkey);
1753 u64 vaddr = be64_to_cpu(reth->vaddr);
1754 int ok;
1755
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001756 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
Ralph Campbellf9315512010-05-23 21:44:54 -07001757 IB_ACCESS_REMOTE_READ);
1758 if (unlikely(!ok))
1759 goto unlock_done;
1760 } else {
1761 e->rdma_sge.vaddr = NULL;
1762 e->rdma_sge.length = 0;
1763 e->rdma_sge.sge_length = 0;
1764 }
1765 e->psn = psn;
1766 if (old_req)
1767 goto unlock_done;
1768 qp->s_tail_ack_queue = prev;
1769 break;
1770 }
1771
1772 case OP(COMPARE_SWAP):
1773 case OP(FETCH_ADD): {
1774 /*
1775 * If we didn't find the atomic request in the ack queue
1776 * or the send tasklet is already backed up to send an
1777 * earlier entry, we can ignore this request.
1778 */
1779 if (!e || e->opcode != (u8) opcode || old_req)
1780 goto unlock_done;
1781 qp->s_tail_ack_queue = prev;
1782 break;
1783 }
1784
1785 default:
1786 /*
1787 * Ignore this operation if it doesn't request an ACK
1788 * or an earlier RDMA read or atomic is going to be resent.
1789 */
1790 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1791 goto unlock_done;
1792 /*
1793 * Resend the most recent ACK if this request is
1794 * after all the previous RDMA reads and atomics.
1795 */
1796 if (i == qp->r_head_ack_queue) {
1797 spin_unlock_irqrestore(&qp->s_lock, flags);
1798 qp->r_nak_state = 0;
1799 qp->r_ack_psn = qp->r_psn - 1;
1800 goto send_ack;
1801 }
1802 /*
1803 * Try to send a simple ACK to work around a Mellanox bug
1804 * which doesn't accept a RDMA read response or atomic
1805 * response as an ACK for earlier SENDs or RDMA writes.
1806 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001807 if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001808 spin_unlock_irqrestore(&qp->s_lock, flags);
1809 qp->r_nak_state = 0;
1810 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1811 goto send_ack;
1812 }
1813 /*
1814 * Resend the RDMA read or atomic op which
1815 * ACKs this duplicate request.
1816 */
1817 qp->s_tail_ack_queue = i;
1818 break;
1819 }
1820 qp->s_ack_state = OP(ACKNOWLEDGE);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001821 qp->s_flags |= RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -07001822 qp->r_nak_state = 0;
1823 qib_schedule_send(qp);
1824
1825unlock_done:
1826 spin_unlock_irqrestore(&qp->s_lock, flags);
1827done:
1828 return 1;
1829
1830send_ack:
1831 return 0;
1832}
1833
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001834void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
Ralph Campbellf9315512010-05-23 21:44:54 -07001835{
1836 unsigned long flags;
1837 int lastwqe;
1838
1839 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001840 lastwqe = rvt_error_qp(qp, err);
Ralph Campbellf9315512010-05-23 21:44:54 -07001841 spin_unlock_irqrestore(&qp->s_lock, flags);
1842
1843 if (lastwqe) {
1844 struct ib_event ev;
1845
1846 ev.device = qp->ibqp.device;
1847 ev.element.qp = &qp->ibqp;
1848 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1849 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1850 }
1851}
1852
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001853static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
Ralph Campbellf9315512010-05-23 21:44:54 -07001854{
1855 unsigned next;
1856
1857 next = n + 1;
1858 if (next > QIB_MAX_RDMA_ATOMIC)
1859 next = 0;
1860 qp->s_tail_ack_queue = next;
1861 qp->s_ack_state = OP(ACKNOWLEDGE);
1862}
1863
1864/**
1865 * qib_rc_rcv - process an incoming RC packet
1866 * @rcd: the context pointer
1867 * @hdr: the header of this packet
1868 * @has_grh: true if the header has a GRH
1869 * @data: the packet data
1870 * @tlen: the packet length
1871 * @qp: the QP for this packet
1872 *
1873 * This is called from qib_qp_rcv() to process an incoming RC packet
1874 * for the given QP.
1875 * Called at interrupt level.
1876 */
1877void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001878 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -07001879{
1880 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
1881 struct qib_other_headers *ohdr;
1882 u32 opcode;
1883 u32 hdrsize;
1884 u32 psn;
1885 u32 pad;
1886 struct ib_wc wc;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -04001887 u32 pmtu = qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -07001888 int diff;
1889 struct ib_reth *reth;
1890 unsigned long flags;
1891 int ret;
1892
1893 /* Check for GRH */
1894 if (!has_grh) {
1895 ohdr = &hdr->u.oth;
1896 hdrsize = 8 + 12; /* LRH + BTH */
1897 } else {
1898 ohdr = &hdr->u.l.oth;
1899 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1900 }
1901
1902 opcode = be32_to_cpu(ohdr->bth[0]);
Ralph Campbellf9315512010-05-23 21:44:54 -07001903 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
Mike Marciniszyn9fd54732011-09-23 13:17:00 -04001904 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07001905
1906 psn = be32_to_cpu(ohdr->bth[2]);
1907 opcode >>= 24;
1908
Ralph Campbellf9315512010-05-23 21:44:54 -07001909 /*
1910 * Process responses (ACKs) before anything else. Note that the
1911 * packet sequence number will be for something in the send work
1912 * queue rather than the expected receive packet sequence number.
1913 * In other words, this QP is the requester.
1914 */
1915 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1916 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1917 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1918 hdrsize, pmtu, rcd);
Ralph Campbella5210c12010-08-02 22:39:30 +00001919 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07001920 }
1921
1922 /* Compute 24 bits worth of difference. */
1923 diff = qib_cmp24(psn, qp->r_psn);
1924 if (unlikely(diff)) {
1925 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
Ralph Campbella5210c12010-08-02 22:39:30 +00001926 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07001927 goto send_ack;
1928 }
1929
1930 /* Check for opcode sequence errors. */
1931 switch (qp->r_state) {
1932 case OP(SEND_FIRST):
1933 case OP(SEND_MIDDLE):
1934 if (opcode == OP(SEND_MIDDLE) ||
1935 opcode == OP(SEND_LAST) ||
1936 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1937 break;
1938 goto nack_inv;
1939
1940 case OP(RDMA_WRITE_FIRST):
1941 case OP(RDMA_WRITE_MIDDLE):
1942 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1943 opcode == OP(RDMA_WRITE_LAST) ||
1944 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1945 break;
1946 goto nack_inv;
1947
1948 default:
1949 if (opcode == OP(SEND_MIDDLE) ||
1950 opcode == OP(SEND_LAST) ||
1951 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1952 opcode == OP(RDMA_WRITE_MIDDLE) ||
1953 opcode == OP(RDMA_WRITE_LAST) ||
1954 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1955 goto nack_inv;
1956 /*
1957 * Note that it is up to the requester to not send a new
1958 * RDMA read or atomic operation before receiving an ACK
1959 * for the previous operation.
1960 */
1961 break;
1962 }
1963
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001964 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
1965 qp->r_flags |= RVT_R_COMM_EST;
Ralph Campbellf9315512010-05-23 21:44:54 -07001966 if (qp->ibqp.event_handler) {
1967 struct ib_event ev;
1968
1969 ev.device = qp->ibqp.device;
1970 ev.element.qp = &qp->ibqp;
1971 ev.event = IB_EVENT_COMM_EST;
1972 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1973 }
1974 }
1975
1976 /* OK, process the packet. */
1977 switch (opcode) {
1978 case OP(SEND_FIRST):
1979 ret = qib_get_rwqe(qp, 0);
1980 if (ret < 0)
1981 goto nack_op_err;
1982 if (!ret)
1983 goto rnr_nak;
1984 qp->r_rcv_len = 0;
1985 /* FALLTHROUGH */
1986 case OP(SEND_MIDDLE):
1987 case OP(RDMA_WRITE_MIDDLE):
1988send_middle:
1989 /* Check for invalid length PMTU or posted rwqe len. */
1990 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1991 goto nack_inv;
1992 qp->r_rcv_len += pmtu;
1993 if (unlikely(qp->r_rcv_len > qp->r_len))
1994 goto nack_inv;
1995 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
1996 break;
1997
1998 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1999 /* consume RWQE */
2000 ret = qib_get_rwqe(qp, 1);
2001 if (ret < 0)
2002 goto nack_op_err;
2003 if (!ret)
2004 goto rnr_nak;
2005 goto send_last_imm;
2006
2007 case OP(SEND_ONLY):
2008 case OP(SEND_ONLY_WITH_IMMEDIATE):
2009 ret = qib_get_rwqe(qp, 0);
2010 if (ret < 0)
2011 goto nack_op_err;
2012 if (!ret)
2013 goto rnr_nak;
2014 qp->r_rcv_len = 0;
2015 if (opcode == OP(SEND_ONLY))
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04002016 goto no_immediate_data;
2017 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
Ralph Campbellf9315512010-05-23 21:44:54 -07002018 case OP(SEND_LAST_WITH_IMMEDIATE):
2019send_last_imm:
2020 wc.ex.imm_data = ohdr->u.imm_data;
2021 hdrsize += 4;
2022 wc.wc_flags = IB_WC_WITH_IMM;
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04002023 goto send_last;
Ralph Campbellf9315512010-05-23 21:44:54 -07002024 case OP(SEND_LAST):
2025 case OP(RDMA_WRITE_LAST):
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04002026no_immediate_data:
2027 wc.wc_flags = 0;
2028 wc.ex.imm_data = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07002029send_last:
2030 /* Get the number of bytes the message was padded by. */
2031 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
2032 /* Check for invalid length. */
2033 /* XXX LAST len should be >= 1 */
2034 if (unlikely(tlen < (hdrsize + pad + 4)))
2035 goto nack_inv;
2036 /* Don't count the CRC. */
2037 tlen -= (hdrsize + pad + 4);
2038 wc.byte_len = tlen + qp->r_rcv_len;
2039 if (unlikely(wc.byte_len > qp->r_len))
2040 goto nack_inv;
2041 qib_copy_sge(&qp->r_sge, data, tlen, 1);
Harish Chegondi70696ea2016-02-03 14:20:27 -08002042 rvt_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -07002043 qp->r_msn++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002044 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
Ralph Campbellf9315512010-05-23 21:44:54 -07002045 break;
2046 wc.wr_id = qp->r_wr_id;
2047 wc.status = IB_WC_SUCCESS;
2048 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2049 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2050 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2051 else
2052 wc.opcode = IB_WC_RECV;
2053 wc.qp = &qp->ibqp;
2054 wc.src_qp = qp->remote_qpn;
2055 wc.slid = qp->remote_ah_attr.dlid;
2056 wc.sl = qp->remote_ah_attr.sl;
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04002057 /* zero fields that are N/A */
2058 wc.vendor_err = 0;
2059 wc.pkey_index = 0;
2060 wc.dlid_path_bits = 0;
2061 wc.port_num = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07002062 /* Signal completion event if the solicited bit is set. */
Harish Chegondi4bb88e52016-01-22 13:07:36 -08002063 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
Ralph Campbellf9315512010-05-23 21:44:54 -07002064 (ohdr->bth[0] &
2065 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
2066 break;
2067
2068 case OP(RDMA_WRITE_FIRST):
2069 case OP(RDMA_WRITE_ONLY):
2070 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2071 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2072 goto nack_inv;
2073 /* consume RWQE */
2074 reth = &ohdr->u.rc.reth;
2075 hdrsize += sizeof(*reth);
2076 qp->r_len = be32_to_cpu(reth->length);
2077 qp->r_rcv_len = 0;
2078 qp->r_sge.sg_list = NULL;
2079 if (qp->r_len != 0) {
2080 u32 rkey = be32_to_cpu(reth->rkey);
2081 u64 vaddr = be64_to_cpu(reth->vaddr);
2082 int ok;
2083
2084 /* Check rkey & NAK */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002085 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
Ralph Campbellf9315512010-05-23 21:44:54 -07002086 rkey, IB_ACCESS_REMOTE_WRITE);
2087 if (unlikely(!ok))
2088 goto nack_acc;
2089 qp->r_sge.num_sge = 1;
2090 } else {
2091 qp->r_sge.num_sge = 0;
2092 qp->r_sge.sge.mr = NULL;
2093 qp->r_sge.sge.vaddr = NULL;
2094 qp->r_sge.sge.length = 0;
2095 qp->r_sge.sge.sge_length = 0;
2096 }
2097 if (opcode == OP(RDMA_WRITE_FIRST))
2098 goto send_middle;
2099 else if (opcode == OP(RDMA_WRITE_ONLY))
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04002100 goto no_immediate_data;
Ralph Campbellf9315512010-05-23 21:44:54 -07002101 ret = qib_get_rwqe(qp, 1);
2102 if (ret < 0)
2103 goto nack_op_err;
2104 if (!ret)
2105 goto rnr_nak;
Jason Gunthorpe5715f5d2010-10-22 22:00:48 +00002106 wc.ex.imm_data = ohdr->u.rc.imm_data;
2107 hdrsize += 4;
2108 wc.wc_flags = IB_WC_WITH_IMM;
2109 goto send_last;
Ralph Campbellf9315512010-05-23 21:44:54 -07002110
2111 case OP(RDMA_READ_REQUEST): {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002112 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -07002113 u32 len;
2114 u8 next;
2115
2116 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2117 goto nack_inv;
2118 next = qp->r_head_ack_queue + 1;
2119 /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
2120 if (next > QIB_MAX_RDMA_ATOMIC)
2121 next = 0;
2122 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07002123 if (unlikely(next == qp->s_tail_ack_queue)) {
2124 if (!qp->s_ack_queue[next].sent)
2125 goto nack_inv_unlck;
2126 qib_update_ack_queue(qp, next);
2127 }
2128 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2129 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002130 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07002131 e->rdma_sge.mr = NULL;
2132 }
2133 reth = &ohdr->u.rc.reth;
2134 len = be32_to_cpu(reth->length);
2135 if (len) {
2136 u32 rkey = be32_to_cpu(reth->rkey);
2137 u64 vaddr = be64_to_cpu(reth->vaddr);
2138 int ok;
2139
2140 /* Check rkey & NAK */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002141 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
Ralph Campbellf9315512010-05-23 21:44:54 -07002142 rkey, IB_ACCESS_REMOTE_READ);
2143 if (unlikely(!ok))
2144 goto nack_acc_unlck;
2145 /*
2146 * Update the next expected PSN. We add 1 later
2147 * below, so only add the remainder here.
2148 */
2149 if (len > pmtu)
2150 qp->r_psn += (len - 1) / pmtu;
2151 } else {
2152 e->rdma_sge.mr = NULL;
2153 e->rdma_sge.vaddr = NULL;
2154 e->rdma_sge.length = 0;
2155 e->rdma_sge.sge_length = 0;
2156 }
2157 e->opcode = opcode;
2158 e->sent = 0;
2159 e->psn = psn;
2160 e->lpsn = qp->r_psn;
2161 /*
2162 * We need to increment the MSN here instead of when we
2163 * finish sending the result since a duplicate request would
2164 * increment it more than once.
2165 */
2166 qp->r_msn++;
2167 qp->r_psn++;
2168 qp->r_state = opcode;
2169 qp->r_nak_state = 0;
2170 qp->r_head_ack_queue = next;
2171
2172 /* Schedule the send tasklet. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002173 qp->s_flags |= RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -07002174 qib_schedule_send(qp);
2175
Ralph Campbella5210c12010-08-02 22:39:30 +00002176 goto sunlock;
Ralph Campbellf9315512010-05-23 21:44:54 -07002177 }
2178
2179 case OP(COMPARE_SWAP):
2180 case OP(FETCH_ADD): {
2181 struct ib_atomic_eth *ateth;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002182 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -07002183 u64 vaddr;
2184 atomic64_t *maddr;
2185 u64 sdata;
2186 u32 rkey;
2187 u8 next;
2188
2189 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2190 goto nack_inv;
2191 next = qp->r_head_ack_queue + 1;
2192 if (next > QIB_MAX_RDMA_ATOMIC)
2193 next = 0;
2194 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07002195 if (unlikely(next == qp->s_tail_ack_queue)) {
2196 if (!qp->s_ack_queue[next].sent)
2197 goto nack_inv_unlck;
2198 qib_update_ack_queue(qp, next);
2199 }
2200 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2201 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002202 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07002203 e->rdma_sge.mr = NULL;
2204 }
2205 ateth = &ohdr->u.atomic_eth;
2206 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
2207 be32_to_cpu(ateth->vaddr[1]);
2208 if (unlikely(vaddr & (sizeof(u64) - 1)))
2209 goto nack_inv_unlck;
2210 rkey = be32_to_cpu(ateth->rkey);
2211 /* Check rkey & NAK */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002212 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
Ralph Campbellf9315512010-05-23 21:44:54 -07002213 vaddr, rkey,
2214 IB_ACCESS_REMOTE_ATOMIC)))
2215 goto nack_acc_unlck;
2216 /* Perform atomic OP and save result. */
2217 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2218 sdata = be64_to_cpu(ateth->swap_data);
2219 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2220 (u64) atomic64_add_return(sdata, maddr) - sdata :
2221 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2222 be64_to_cpu(ateth->compare_data),
2223 sdata);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002224 rvt_put_mr(qp->r_sge.sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07002225 qp->r_sge.num_sge = 0;
2226 e->opcode = opcode;
2227 e->sent = 0;
2228 e->psn = psn;
2229 e->lpsn = psn;
2230 qp->r_msn++;
2231 qp->r_psn++;
2232 qp->r_state = opcode;
2233 qp->r_nak_state = 0;
2234 qp->r_head_ack_queue = next;
2235
2236 /* Schedule the send tasklet. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002237 qp->s_flags |= RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -07002238 qib_schedule_send(qp);
2239
Ralph Campbella5210c12010-08-02 22:39:30 +00002240 goto sunlock;
Ralph Campbellf9315512010-05-23 21:44:54 -07002241 }
2242
2243 default:
2244 /* NAK unknown opcodes. */
2245 goto nack_inv;
2246 }
2247 qp->r_psn++;
2248 qp->r_state = opcode;
2249 qp->r_ack_psn = psn;
2250 qp->r_nak_state = 0;
2251 /* Send an ACK if requested or required. */
2252 if (psn & (1 << 31))
2253 goto send_ack;
Ralph Campbella5210c12010-08-02 22:39:30 +00002254 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002255
2256rnr_nak:
2257 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2258 qp->r_ack_psn = qp->r_psn;
2259 /* Queue RNR NAK for later */
2260 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002261 qp->r_flags |= RVT_R_RSP_NAK;
Ralph Campbellf9315512010-05-23 21:44:54 -07002262 atomic_inc(&qp->refcount);
2263 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2264 }
Ralph Campbella5210c12010-08-02 22:39:30 +00002265 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002266
2267nack_op_err:
2268 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2269 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2270 qp->r_ack_psn = qp->r_psn;
2271 /* Queue NAK for later */
2272 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002273 qp->r_flags |= RVT_R_RSP_NAK;
Ralph Campbellf9315512010-05-23 21:44:54 -07002274 atomic_inc(&qp->refcount);
2275 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2276 }
Ralph Campbella5210c12010-08-02 22:39:30 +00002277 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002278
2279nack_inv_unlck:
2280 spin_unlock_irqrestore(&qp->s_lock, flags);
2281nack_inv:
2282 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2283 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2284 qp->r_ack_psn = qp->r_psn;
2285 /* Queue NAK for later */
2286 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002287 qp->r_flags |= RVT_R_RSP_NAK;
Ralph Campbellf9315512010-05-23 21:44:54 -07002288 atomic_inc(&qp->refcount);
2289 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2290 }
Ralph Campbella5210c12010-08-02 22:39:30 +00002291 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002292
2293nack_acc_unlck:
2294 spin_unlock_irqrestore(&qp->s_lock, flags);
2295nack_acc:
2296 qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
2297 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2298 qp->r_ack_psn = qp->r_psn;
2299send_ack:
2300 qib_send_rc_ack(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07002301 return;
2302
2303sunlock:
2304 spin_unlock_irqrestore(&qp->s_lock, flags);
2305}