blob: d549905147304d5fcdc734581f180d6aa5a7e4f6 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/io.h>
35
36#include "qib.h"
37
38/* cut down ridiculously long IB macro names */
39#define OP(x) IB_OPCODE_RC_##x
40
41static void rc_timeout(unsigned long arg);
42
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080043static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
Ralph Campbellf9315512010-05-23 21:44:54 -070044 u32 psn, u32 pmtu)
45{
46 u32 len;
47
48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
49 ss->sge = wqe->sg_list[0];
50 ss->sg_list = wqe->sg_list + 1;
51 ss->num_sge = wqe->wr.num_sge;
52 ss->total_len = wqe->length;
53 qib_skip_sge(ss, len, 0);
54 return wqe->length - len;
55}
56
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080057static void start_timer(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -070058{
Harish Chegondi01ba79d2016-01-22 12:56:46 -080059 qp->s_flags |= RVT_S_TIMER;
Ralph Campbellf9315512010-05-23 21:44:54 -070060 qp->s_timer.function = rc_timeout;
61 /* 4.096 usec. * (1 << qp->timeout) */
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -040062 qp->s_timer.expires = jiffies + qp->timeout_jiffies;
Ralph Campbellf9315512010-05-23 21:44:54 -070063 add_timer(&qp->s_timer);
64}
65
66/**
67 * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
68 * @dev: the device for this QP
69 * @qp: a pointer to the QP
70 * @ohdr: a pointer to the IB header being constructed
71 * @pmtu: the path MTU
72 *
73 * Return 1 if constructed; otherwise, return 0.
74 * Note that we are in the responder's side of the QP context.
75 * Note the QP s_lock must be held.
76 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080077static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
Mike Marciniszyn261a4352016-09-06 04:35:05 -070078 struct ib_other_headers *ohdr, u32 pmtu)
Ralph Campbellf9315512010-05-23 21:44:54 -070079{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080080 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -070081 u32 hwords;
82 u32 len;
83 u32 bth0;
84 u32 bth2;
85
86 /* Don't send an ACK if we aren't supposed to. */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -080087 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -070088 goto bail;
89
90 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
91 hwords = 5;
92
93 switch (qp->s_ack_state) {
94 case OP(RDMA_READ_RESPONSE_LAST):
95 case OP(RDMA_READ_RESPONSE_ONLY):
96 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
97 if (e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080098 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -070099 e->rdma_sge.mr = NULL;
100 }
101 /* FALLTHROUGH */
102 case OP(ATOMIC_ACKNOWLEDGE):
103 /*
104 * We can increment the tail pointer now that the last
105 * response has been sent instead of only being
106 * constructed.
107 */
108 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
109 qp->s_tail_ack_queue = 0;
110 /* FALLTHROUGH */
111 case OP(SEND_ONLY):
112 case OP(ACKNOWLEDGE):
113 /* Check for no next entry in the queue. */
114 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800115 if (qp->s_flags & RVT_S_ACK_PENDING)
Ralph Campbellf9315512010-05-23 21:44:54 -0700116 goto normal;
117 goto bail;
118 }
119
120 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
121 if (e->opcode == OP(RDMA_READ_REQUEST)) {
122 /*
123 * If a RDMA read response is being resent and
124 * we haven't seen the duplicate request yet,
125 * then stop sending the remaining responses the
126 * responder has seen until the requester resends it.
127 */
128 len = e->rdma_sge.sge_length;
129 if (len && !e->rdma_sge.mr) {
130 qp->s_tail_ack_queue = qp->r_head_ack_queue;
131 goto bail;
132 }
133 /* Copy SGE state in case we need to resend */
134 qp->s_rdma_mr = e->rdma_sge.mr;
135 if (qp->s_rdma_mr)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800136 rvt_get_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700137 qp->s_ack_rdma_sge.sge = e->rdma_sge;
138 qp->s_ack_rdma_sge.num_sge = 1;
139 qp->s_cur_sge = &qp->s_ack_rdma_sge;
140 if (len > pmtu) {
141 len = pmtu;
142 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
143 } else {
144 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
145 e->sent = 1;
146 }
147 ohdr->u.aeth = qib_compute_aeth(qp);
148 hwords++;
149 qp->s_ack_rdma_psn = e->psn;
150 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
151 } else {
152 /* COMPARE_SWAP or FETCH_ADD */
153 qp->s_cur_sge = NULL;
154 len = 0;
155 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
156 ohdr->u.at.aeth = qib_compute_aeth(qp);
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700157 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
Ralph Campbellf9315512010-05-23 21:44:54 -0700158 hwords += sizeof(ohdr->u.at) / sizeof(u32);
159 bth2 = e->psn & QIB_PSN_MASK;
160 e->sent = 1;
161 }
162 bth0 = qp->s_ack_state << 24;
163 break;
164
165 case OP(RDMA_READ_RESPONSE_FIRST):
166 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
167 /* FALLTHROUGH */
168 case OP(RDMA_READ_RESPONSE_MIDDLE):
169 qp->s_cur_sge = &qp->s_ack_rdma_sge;
170 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
171 if (qp->s_rdma_mr)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800172 rvt_get_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700173 len = qp->s_ack_rdma_sge.sge.sge_length;
174 if (len > pmtu)
175 len = pmtu;
176 else {
177 ohdr->u.aeth = qib_compute_aeth(qp);
178 hwords++;
179 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
180 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
181 e->sent = 1;
182 }
183 bth0 = qp->s_ack_state << 24;
184 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
185 break;
186
187 default:
188normal:
189 /*
190 * Send a regular ACK.
191 * Set the s_ack_state so we wait until after sending
192 * the ACK before setting s_ack_state to ACKNOWLEDGE
193 * (see above).
194 */
195 qp->s_ack_state = OP(SEND_ONLY);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800196 qp->s_flags &= ~RVT_S_ACK_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -0700197 qp->s_cur_sge = NULL;
198 if (qp->s_nak_state)
199 ohdr->u.aeth =
200 cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
201 (qp->s_nak_state <<
202 QIB_AETH_CREDIT_SHIFT));
203 else
204 ohdr->u.aeth = qib_compute_aeth(qp);
205 hwords++;
206 len = 0;
207 bth0 = OP(ACKNOWLEDGE) << 24;
208 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
209 }
210 qp->s_rdma_ack_cnt++;
211 qp->s_hdrwords = hwords;
212 qp->s_cur_size = len;
213 qib_make_ruc_header(qp, ohdr, bth0, bth2);
214 return 1;
215
216bail:
217 qp->s_ack_state = OP(ACKNOWLEDGE);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800218 qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
Ralph Campbellf9315512010-05-23 21:44:54 -0700219 return 0;
220}
221
222/**
223 * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
224 * @qp: a pointer to the QP
225 *
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800226 * Assumes the s_lock is held.
227 *
Ralph Campbellf9315512010-05-23 21:44:54 -0700228 * Return 1 if constructed; otherwise, return 0.
229 */
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700230int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
Ralph Campbellf9315512010-05-23 21:44:54 -0700231{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800232 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700233 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700234 struct ib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800235 struct rvt_sge_state *ss;
236 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700237 u32 hwords;
238 u32 len;
239 u32 bth0;
240 u32 bth2;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400241 u32 pmtu = qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -0700242 char newreq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700243 int ret = 0;
244 int delta;
245
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800246 ohdr = &priv->s_hdr->u.oth;
Ralph Campbellf9315512010-05-23 21:44:54 -0700247 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800248 ohdr = &priv->s_hdr->u.l.oth;
Ralph Campbellf9315512010-05-23 21:44:54 -0700249
Ralph Campbellf9315512010-05-23 21:44:54 -0700250 /* Sending responses has higher priority over sending requests. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800251 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
Ralph Campbellf9315512010-05-23 21:44:54 -0700252 qib_make_rc_ack(dev, qp, ohdr, pmtu))
253 goto done;
254
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800255 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
256 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
Ralph Campbellf9315512010-05-23 21:44:54 -0700257 goto bail;
258 /* We are in the error state, flush the work request. */
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800259 smp_read_barrier_depends(); /* see post_one_send() */
260 if (qp->s_last == ACCESS_ONCE(qp->s_head))
Ralph Campbellf9315512010-05-23 21:44:54 -0700261 goto bail;
262 /* If DMAs are in progress, we can't flush immediately. */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800263 if (atomic_read(&priv->s_dma_busy)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800264 qp->s_flags |= RVT_S_WAIT_DMA;
Ralph Campbellf9315512010-05-23 21:44:54 -0700265 goto bail;
266 }
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800267 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Mike Marciniszyn30ab7e22011-11-04 08:26:52 -0400268 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
269 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
270 /* will get called again */
Ralph Campbellf9315512010-05-23 21:44:54 -0700271 goto done;
272 }
273
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800274 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700275 goto bail;
276
277 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
278 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800279 qp->s_flags |= RVT_S_WAIT_PSN;
Ralph Campbellf9315512010-05-23 21:44:54 -0700280 goto bail;
281 }
282 qp->s_sending_psn = qp->s_psn;
283 qp->s_sending_hpsn = qp->s_psn - 1;
284 }
285
286 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
287 hwords = 5;
288 bth0 = 0;
289
290 /* Send a request. */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800291 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
Ralph Campbellf9315512010-05-23 21:44:54 -0700292 switch (qp->s_state) {
293 default:
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800294 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700295 goto bail;
296 /*
297 * Resend an old request or start a new one.
298 *
299 * We keep track of the current SWQE so that
300 * we don't reset the "furthest progress" state
301 * if we need to back up.
302 */
303 newreq = 0;
304 if (qp->s_cur == qp->s_tail) {
305 /* Check if send work queue is empty. */
306 if (qp->s_tail == qp->s_head)
307 goto bail;
308 /*
309 * If a fence is requested, wait for previous
310 * RDMA read and atomic operations to finish.
311 */
312 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
313 qp->s_num_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800314 qp->s_flags |= RVT_S_WAIT_FENCE;
Ralph Campbellf9315512010-05-23 21:44:54 -0700315 goto bail;
316 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700317 newreq = 1;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800318 qp->s_psn = wqe->psn;
Ralph Campbellf9315512010-05-23 21:44:54 -0700319 }
320 /*
321 * Note that we have to be careful not to modify the
322 * original work request since we may need to resend
323 * it.
324 */
325 len = wqe->length;
326 ss = &qp->s_sge;
327 bth2 = qp->s_psn & QIB_PSN_MASK;
328 switch (wqe->wr.opcode) {
329 case IB_WR_SEND:
330 case IB_WR_SEND_WITH_IMM:
331 /* If no credit, return. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800332 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
Ralph Campbellf9315512010-05-23 21:44:54 -0700333 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800334 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -0700335 goto bail;
336 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700337 if (len > pmtu) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700338 qp->s_state = OP(SEND_FIRST);
339 len = pmtu;
340 break;
341 }
342 if (wqe->wr.opcode == IB_WR_SEND)
343 qp->s_state = OP(SEND_ONLY);
344 else {
345 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
346 /* Immediate data comes after the BTH */
347 ohdr->u.imm_data = wqe->wr.ex.imm_data;
348 hwords += 1;
349 }
350 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
351 bth0 |= IB_BTH_SOLICITED;
352 bth2 |= IB_BTH_REQ_ACK;
353 if (++qp->s_cur == qp->s_size)
354 qp->s_cur = 0;
355 break;
356
357 case IB_WR_RDMA_WRITE:
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800358 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
Ralph Campbellf9315512010-05-23 21:44:54 -0700359 qp->s_lsn++;
360 /* FALLTHROUGH */
361 case IB_WR_RDMA_WRITE_WITH_IMM:
362 /* If no credit, return. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800363 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
Ralph Campbellf9315512010-05-23 21:44:54 -0700364 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800365 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -0700366 goto bail;
367 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100368
Ralph Campbellf9315512010-05-23 21:44:54 -0700369 ohdr->u.rc.reth.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100370 cpu_to_be64(wqe->rdma_wr.remote_addr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700371 ohdr->u.rc.reth.rkey =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100372 cpu_to_be32(wqe->rdma_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700373 ohdr->u.rc.reth.length = cpu_to_be32(len);
374 hwords += sizeof(struct ib_reth) / sizeof(u32);
Ralph Campbellf9315512010-05-23 21:44:54 -0700375 if (len > pmtu) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700376 qp->s_state = OP(RDMA_WRITE_FIRST);
377 len = pmtu;
378 break;
379 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100380 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
Ralph Campbellf9315512010-05-23 21:44:54 -0700381 qp->s_state = OP(RDMA_WRITE_ONLY);
382 else {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100383 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
Ralph Campbellf9315512010-05-23 21:44:54 -0700384 /* Immediate data comes after RETH */
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100385 ohdr->u.rc.imm_data =
386 wqe->rdma_wr.wr.ex.imm_data;
Ralph Campbellf9315512010-05-23 21:44:54 -0700387 hwords += 1;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100388 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
Ralph Campbellf9315512010-05-23 21:44:54 -0700389 bth0 |= IB_BTH_SOLICITED;
390 }
391 bth2 |= IB_BTH_REQ_ACK;
392 if (++qp->s_cur == qp->s_size)
393 qp->s_cur = 0;
394 break;
395
396 case IB_WR_RDMA_READ:
397 /*
398 * Don't allow more operations to be started
399 * than the QP limits allow.
400 */
401 if (newreq) {
402 if (qp->s_num_rd_atomic >=
403 qp->s_max_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800404 qp->s_flags |= RVT_S_WAIT_RDMAR;
Ralph Campbellf9315512010-05-23 21:44:54 -0700405 goto bail;
406 }
407 qp->s_num_rd_atomic++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800408 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
Ralph Campbellf9315512010-05-23 21:44:54 -0700409 qp->s_lsn++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700410 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100411
Ralph Campbellf9315512010-05-23 21:44:54 -0700412 ohdr->u.rc.reth.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100413 cpu_to_be64(wqe->rdma_wr.remote_addr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700414 ohdr->u.rc.reth.rkey =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100415 cpu_to_be32(wqe->rdma_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700416 ohdr->u.rc.reth.length = cpu_to_be32(len);
417 qp->s_state = OP(RDMA_READ_REQUEST);
418 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
419 ss = NULL;
420 len = 0;
421 bth2 |= IB_BTH_REQ_ACK;
422 if (++qp->s_cur == qp->s_size)
423 qp->s_cur = 0;
424 break;
425
426 case IB_WR_ATOMIC_CMP_AND_SWP:
427 case IB_WR_ATOMIC_FETCH_AND_ADD:
428 /*
429 * Don't allow more operations to be started
430 * than the QP limits allow.
431 */
432 if (newreq) {
433 if (qp->s_num_rd_atomic >=
434 qp->s_max_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800435 qp->s_flags |= RVT_S_WAIT_RDMAR;
Ralph Campbellf9315512010-05-23 21:44:54 -0700436 goto bail;
437 }
438 qp->s_num_rd_atomic++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800439 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
Ralph Campbellf9315512010-05-23 21:44:54 -0700440 qp->s_lsn++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700441 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100442 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700443 qp->s_state = OP(COMPARE_SWAP);
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700444 put_ib_ateth_swap(wqe->atomic_wr.swap,
445 &ohdr->u.atomic_eth);
446 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
447 &ohdr->u.atomic_eth);
Ralph Campbellf9315512010-05-23 21:44:54 -0700448 } else {
449 qp->s_state = OP(FETCH_ADD);
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700450 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
451 &ohdr->u.atomic_eth);
452 put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
Ralph Campbellf9315512010-05-23 21:44:54 -0700453 }
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700454 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
455 &ohdr->u.atomic_eth);
Ralph Campbellf9315512010-05-23 21:44:54 -0700456 ohdr->u.atomic_eth.rkey = cpu_to_be32(
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100457 wqe->atomic_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700458 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
459 ss = NULL;
460 len = 0;
461 bth2 |= IB_BTH_REQ_ACK;
462 if (++qp->s_cur == qp->s_size)
463 qp->s_cur = 0;
464 break;
465
466 default:
467 goto bail;
468 }
469 qp->s_sge.sge = wqe->sg_list[0];
470 qp->s_sge.sg_list = wqe->sg_list + 1;
471 qp->s_sge.num_sge = wqe->wr.num_sge;
472 qp->s_sge.total_len = wqe->length;
473 qp->s_len = wqe->length;
474 if (newreq) {
475 qp->s_tail++;
476 if (qp->s_tail >= qp->s_size)
477 qp->s_tail = 0;
478 }
479 if (wqe->wr.opcode == IB_WR_RDMA_READ)
480 qp->s_psn = wqe->lpsn + 1;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800481 else
Ralph Campbellf9315512010-05-23 21:44:54 -0700482 qp->s_psn++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700483 break;
484
485 case OP(RDMA_READ_RESPONSE_FIRST):
486 /*
487 * qp->s_state is normally set to the opcode of the
488 * last packet constructed for new requests and therefore
489 * is never set to RDMA read response.
490 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
491 * thread to indicate a SEND needs to be restarted from an
492 * earlier PSN without interferring with the sending thread.
493 * See qib_restart_rc().
494 */
495 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
496 /* FALLTHROUGH */
497 case OP(SEND_FIRST):
498 qp->s_state = OP(SEND_MIDDLE);
499 /* FALLTHROUGH */
500 case OP(SEND_MIDDLE):
501 bth2 = qp->s_psn++ & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700502 ss = &qp->s_sge;
503 len = qp->s_len;
504 if (len > pmtu) {
505 len = pmtu;
506 break;
507 }
508 if (wqe->wr.opcode == IB_WR_SEND)
509 qp->s_state = OP(SEND_LAST);
510 else {
511 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
512 /* Immediate data comes after the BTH */
513 ohdr->u.imm_data = wqe->wr.ex.imm_data;
514 hwords += 1;
515 }
516 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
517 bth0 |= IB_BTH_SOLICITED;
518 bth2 |= IB_BTH_REQ_ACK;
519 qp->s_cur++;
520 if (qp->s_cur >= qp->s_size)
521 qp->s_cur = 0;
522 break;
523
524 case OP(RDMA_READ_RESPONSE_LAST):
525 /*
526 * qp->s_state is normally set to the opcode of the
527 * last packet constructed for new requests and therefore
528 * is never set to RDMA read response.
529 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
530 * thread to indicate a RDMA write needs to be restarted from
531 * an earlier PSN without interferring with the sending thread.
532 * See qib_restart_rc().
533 */
534 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
535 /* FALLTHROUGH */
536 case OP(RDMA_WRITE_FIRST):
537 qp->s_state = OP(RDMA_WRITE_MIDDLE);
538 /* FALLTHROUGH */
539 case OP(RDMA_WRITE_MIDDLE):
540 bth2 = qp->s_psn++ & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700541 ss = &qp->s_sge;
542 len = qp->s_len;
543 if (len > pmtu) {
544 len = pmtu;
545 break;
546 }
547 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
548 qp->s_state = OP(RDMA_WRITE_LAST);
549 else {
550 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
551 /* Immediate data comes after the BTH */
552 ohdr->u.imm_data = wqe->wr.ex.imm_data;
553 hwords += 1;
554 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
555 bth0 |= IB_BTH_SOLICITED;
556 }
557 bth2 |= IB_BTH_REQ_ACK;
558 qp->s_cur++;
559 if (qp->s_cur >= qp->s_size)
560 qp->s_cur = 0;
561 break;
562
563 case OP(RDMA_READ_RESPONSE_MIDDLE):
564 /*
565 * qp->s_state is normally set to the opcode of the
566 * last packet constructed for new requests and therefore
567 * is never set to RDMA read response.
568 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
569 * thread to indicate a RDMA read needs to be restarted from
570 * an earlier PSN without interferring with the sending thread.
571 * See qib_restart_rc().
572 */
573 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
574 ohdr->u.rc.reth.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100575 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
Ralph Campbellf9315512010-05-23 21:44:54 -0700576 ohdr->u.rc.reth.rkey =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100577 cpu_to_be32(wqe->rdma_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700578 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
579 qp->s_state = OP(RDMA_READ_REQUEST);
580 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
581 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
582 qp->s_psn = wqe->lpsn + 1;
583 ss = NULL;
584 len = 0;
585 qp->s_cur++;
586 if (qp->s_cur == qp->s_size)
587 qp->s_cur = 0;
588 break;
589 }
590 qp->s_sending_hpsn = bth2;
591 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
592 if (delta && delta % QIB_PSN_CREDIT == 0)
593 bth2 |= IB_BTH_REQ_ACK;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800594 if (qp->s_flags & RVT_S_SEND_ONE) {
595 qp->s_flags &= ~RVT_S_SEND_ONE;
596 qp->s_flags |= RVT_S_WAIT_ACK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700597 bth2 |= IB_BTH_REQ_ACK;
598 }
599 qp->s_len -= len;
600 qp->s_hdrwords = hwords;
601 qp->s_cur_sge = ss;
602 qp->s_cur_size = len;
603 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
604done:
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800605 return 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700606bail:
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800607 qp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700608 return ret;
609}
610
611/**
612 * qib_send_rc_ack - Construct an ACK packet and send it
613 * @qp: a pointer to the QP
614 *
615 * This is called from qib_rc_rcv() and qib_kreceive().
616 * Note that RDMA reads and atomics are handled in the
617 * send side QP state and tasklet.
618 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800619void qib_send_rc_ack(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700620{
621 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
622 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
623 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
624 u64 pbc;
625 u16 lrh0;
626 u32 bth0;
627 u32 hwords;
628 u32 pbufn;
629 u32 __iomem *piobuf;
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700630 struct ib_header hdr;
631 struct ib_other_headers *ohdr;
Ralph Campbellf9315512010-05-23 21:44:54 -0700632 u32 control;
633 unsigned long flags;
634
635 spin_lock_irqsave(&qp->s_lock, flags);
636
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800637 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700638 goto unlock;
639
640 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800641 if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
Ralph Campbellf9315512010-05-23 21:44:54 -0700642 goto queue_ack;
643
644 /* Construct the header with s_lock held so APM doesn't change it. */
645 ohdr = &hdr.u.oth;
646 lrh0 = QIB_LRH_BTH;
647 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
648 hwords = 6;
649 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
650 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
651 &qp->remote_ah_attr.grh, hwords, 0);
652 ohdr = &hdr.u.l.oth;
653 lrh0 = QIB_LRH_GRH;
654 }
655 /* read pkey_index w/o lock (its atomic) */
656 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
657 if (qp->s_mig_state == IB_MIG_MIGRATED)
658 bth0 |= IB_BTH_MIG_REQ;
659 if (qp->r_nak_state)
660 ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
661 (qp->r_nak_state <<
662 QIB_AETH_CREDIT_SHIFT));
663 else
664 ohdr->u.aeth = qib_compute_aeth(qp);
665 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
666 qp->remote_ah_attr.sl << 4;
667 hdr.lrh[0] = cpu_to_be16(lrh0);
668 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
669 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
670 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
671 ohdr->bth[0] = cpu_to_be32(bth0);
672 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
673 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
674
675 spin_unlock_irqrestore(&qp->s_lock, flags);
676
677 /* Don't try to send ACKs if the link isn't ACTIVE */
678 if (!(ppd->lflags & QIBL_LINKACTIVE))
679 goto done;
680
681 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
682 qp->s_srate, lrh0 >> 12);
683 /* length is + 1 for the control dword */
684 pbc = ((u64) control << 32) | (hwords + 1);
685
686 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
687 if (!piobuf) {
688 /*
689 * We are out of PIO buffers at the moment.
690 * Pass responsibility for sending the ACK to the
691 * send tasklet so that when a PIO buffer becomes
692 * available, the ACK is sent ahead of other outgoing
693 * packets.
694 */
695 spin_lock_irqsave(&qp->s_lock, flags);
696 goto queue_ack;
697 }
698
699 /*
700 * Write the pbc.
701 * We have to flush after the PBC for correctness
702 * on some cpus or WC buffer can be written out of order.
703 */
704 writeq(pbc, piobuf);
705
706 if (dd->flags & QIB_PIO_FLUSH_WC) {
707 u32 *hdrp = (u32 *) &hdr;
708
709 qib_flush_wc();
710 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
711 qib_flush_wc();
712 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
713 } else
714 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
715
716 if (dd->flags & QIB_USE_SPCL_TRIG) {
717 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
718
719 qib_flush_wc();
720 __raw_writel(0xaebecede, piobuf + spcl_off);
721 }
722
723 qib_flush_wc();
724 qib_sendbuf_done(dd, pbufn);
725
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500726 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
Ralph Campbellf9315512010-05-23 21:44:54 -0700727 goto done;
728
729queue_ack:
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800730 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800731 this_cpu_inc(*ibp->rvp.rc_qacks);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800732 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -0700733 qp->s_nak_state = qp->r_nak_state;
734 qp->s_ack_psn = qp->r_ack_psn;
735
736 /* Schedule the send tasklet. */
737 qib_schedule_send(qp);
738 }
739unlock:
740 spin_unlock_irqrestore(&qp->s_lock, flags);
741done:
742 return;
743}
744
745/**
746 * reset_psn - reset the QP state to send starting from PSN
747 * @qp: the QP
748 * @psn: the packet sequence number to restart at
749 *
750 * This is called from qib_rc_rcv() to process an incoming RC ACK
751 * for the given QP.
752 * Called at interrupt level with the QP s_lock held.
753 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800754static void reset_psn(struct rvt_qp *qp, u32 psn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700755{
756 u32 n = qp->s_acked;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800757 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
Ralph Campbellf9315512010-05-23 21:44:54 -0700758 u32 opcode;
759
760 qp->s_cur = n;
761
762 /*
763 * If we are starting the request from the beginning,
764 * let the normal send code handle initialization.
765 */
766 if (qib_cmp24(psn, wqe->psn) <= 0) {
767 qp->s_state = OP(SEND_LAST);
768 goto done;
769 }
770
771 /* Find the work request opcode corresponding to the given PSN. */
772 opcode = wqe->wr.opcode;
773 for (;;) {
774 int diff;
775
776 if (++n == qp->s_size)
777 n = 0;
778 if (n == qp->s_tail)
779 break;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800780 wqe = rvt_get_swqe_ptr(qp, n);
Ralph Campbellf9315512010-05-23 21:44:54 -0700781 diff = qib_cmp24(psn, wqe->psn);
782 if (diff < 0)
783 break;
784 qp->s_cur = n;
785 /*
786 * If we are starting the request from the beginning,
787 * let the normal send code handle initialization.
788 */
789 if (diff == 0) {
790 qp->s_state = OP(SEND_LAST);
791 goto done;
792 }
793 opcode = wqe->wr.opcode;
794 }
795
796 /*
797 * Set the state to restart in the middle of a request.
798 * Don't change the s_sge, s_cur_sge, or s_cur_size.
799 * See qib_make_rc_req().
800 */
801 switch (opcode) {
802 case IB_WR_SEND:
803 case IB_WR_SEND_WITH_IMM:
804 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
805 break;
806
807 case IB_WR_RDMA_WRITE:
808 case IB_WR_RDMA_WRITE_WITH_IMM:
809 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
810 break;
811
812 case IB_WR_RDMA_READ:
813 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
814 break;
815
816 default:
817 /*
818 * This case shouldn't happen since its only
819 * one PSN per req.
820 */
821 qp->s_state = OP(SEND_LAST);
822 }
823done:
824 qp->s_psn = psn;
825 /*
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800826 * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
Ralph Campbellf9315512010-05-23 21:44:54 -0700827 * asynchronously before the send tasklet can get scheduled.
828 * Doing it in qib_make_rc_req() is too late.
829 */
830 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
831 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800832 qp->s_flags |= RVT_S_WAIT_PSN;
Ralph Campbellf9315512010-05-23 21:44:54 -0700833}
834
835/*
836 * Back up requester to resend the last un-ACKed request.
Ralph Campbella5210c12010-08-02 22:39:30 +0000837 * The QP r_lock and s_lock should be held and interrupts disabled.
Ralph Campbellf9315512010-05-23 21:44:54 -0700838 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800839static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
Ralph Campbellf9315512010-05-23 21:44:54 -0700840{
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800841 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -0700842 struct qib_ibport *ibp;
843
844 if (qp->s_retry == 0) {
845 if (qp->s_mig_state == IB_MIG_ARMED) {
846 qib_migrate_qp(qp);
847 qp->s_retry = qp->s_retry_cnt;
848 } else if (qp->s_last == qp->s_acked) {
849 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
Harish Chegondi70696ea2016-02-03 14:20:27 -0800850 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -0700851 return;
852 } else /* XXX need to handle delayed completion */
853 return;
854 } else
855 qp->s_retry--;
856
857 ibp = to_iport(qp->ibqp.device, qp->port_num);
858 if (wqe->wr.opcode == IB_WR_RDMA_READ)
Harish Chegondif24a6d42016-01-22 12:56:02 -0800859 ibp->rvp.n_rc_resends++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700860 else
Harish Chegondif24a6d42016-01-22 12:56:02 -0800861 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700862
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800863 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
864 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
865 RVT_S_WAIT_ACK);
Ralph Campbellf9315512010-05-23 21:44:54 -0700866 if (wait)
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800867 qp->s_flags |= RVT_S_SEND_ONE;
Ralph Campbellf9315512010-05-23 21:44:54 -0700868 reset_psn(qp, psn);
869}
870
871/*
872 * This is called from s_timer for missing responses.
873 */
874static void rc_timeout(unsigned long arg)
875{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800876 struct rvt_qp *qp = (struct rvt_qp *)arg;
Ralph Campbellf9315512010-05-23 21:44:54 -0700877 struct qib_ibport *ibp;
878 unsigned long flags;
879
Ralph Campbella5210c12010-08-02 22:39:30 +0000880 spin_lock_irqsave(&qp->r_lock, flags);
881 spin_lock(&qp->s_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800882 if (qp->s_flags & RVT_S_TIMER) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700883 ibp = to_iport(qp->ibqp.device, qp->port_num);
Harish Chegondif24a6d42016-01-22 12:56:02 -0800884 ibp->rvp.n_rc_timeouts++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800885 qp->s_flags &= ~RVT_S_TIMER;
Ralph Campbellf9315512010-05-23 21:44:54 -0700886 del_timer(&qp->s_timer);
887 qib_restart_rc(qp, qp->s_last_psn + 1, 1);
888 qib_schedule_send(qp);
889 }
Ralph Campbella5210c12010-08-02 22:39:30 +0000890 spin_unlock(&qp->s_lock);
891 spin_unlock_irqrestore(&qp->r_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700892}
893
894/*
895 * This is called from s_timer for RNR timeouts.
896 */
897void qib_rc_rnr_retry(unsigned long arg)
898{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800899 struct rvt_qp *qp = (struct rvt_qp *)arg;
Ralph Campbellf9315512010-05-23 21:44:54 -0700900 unsigned long flags;
901
902 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800903 if (qp->s_flags & RVT_S_WAIT_RNR) {
904 qp->s_flags &= ~RVT_S_WAIT_RNR;
Ralph Campbellf9315512010-05-23 21:44:54 -0700905 del_timer(&qp->s_timer);
906 qib_schedule_send(qp);
907 }
908 spin_unlock_irqrestore(&qp->s_lock, flags);
909}
910
911/*
912 * Set qp->s_sending_psn to the next PSN after the given one.
913 * This would be psn+1 except when RDMA reads are present.
914 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800915static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700916{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800917 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700918 u32 n = qp->s_last;
919
920 /* Find the work request corresponding to the given PSN. */
921 for (;;) {
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800922 wqe = rvt_get_swqe_ptr(qp, n);
Ralph Campbellf9315512010-05-23 21:44:54 -0700923 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
924 if (wqe->wr.opcode == IB_WR_RDMA_READ)
925 qp->s_sending_psn = wqe->lpsn + 1;
926 else
927 qp->s_sending_psn = psn + 1;
928 break;
929 }
930 if (++n == qp->s_size)
931 n = 0;
932 if (n == qp->s_tail)
933 break;
934 }
935}
936
937/*
938 * This should be called with the QP s_lock held and interrupts disabled.
939 */
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700940void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
Ralph Campbellf9315512010-05-23 21:44:54 -0700941{
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700942 struct ib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800943 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700944 unsigned i;
945 u32 opcode;
946 u32 psn;
947
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800948 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
Ralph Campbellf9315512010-05-23 21:44:54 -0700949 return;
950
951 /* Find out where the BTH is */
952 if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
953 ohdr = &hdr->u.oth;
954 else
955 ohdr = &hdr->u.l.oth;
956
957 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
958 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
959 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
960 WARN_ON(!qp->s_rdma_ack_cnt);
961 qp->s_rdma_ack_cnt--;
962 return;
963 }
964
965 psn = be32_to_cpu(ohdr->bth[2]);
966 reset_sending_psn(qp, psn);
967
968 /*
969 * Start timer after a packet requesting an ACK has been sent and
970 * there are still requests that haven't been acked.
971 */
972 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800973 !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800974 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700975 start_timer(qp);
976
977 while (qp->s_last != qp->s_acked) {
Mike Marciniszynee845412016-02-04 11:03:28 -0800978 u32 s_last;
979
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800980 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Ralph Campbellf9315512010-05-23 21:44:54 -0700981 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
982 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
983 break;
Mike Marciniszynee845412016-02-04 11:03:28 -0800984 s_last = qp->s_last;
985 if (++s_last >= qp->s_size)
986 s_last = 0;
987 qp->s_last = s_last;
988 /* see post_send() */
989 barrier();
Ralph Campbellf9315512010-05-23 21:44:54 -0700990 for (i = 0; i < wqe->wr.num_sge; i++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800991 struct rvt_sge *sge = &wqe->sg_list[i];
Ralph Campbellf9315512010-05-23 21:44:54 -0700992
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800993 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700994 }
Mike Marciniszyn0771da52016-12-07 19:34:12 -0800995 rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
Ralph Campbellf9315512010-05-23 21:44:54 -0700996 }
997 /*
998 * If we were waiting for sends to complete before resending,
999 * and they are now complete, restart sending.
1000 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001001 if (qp->s_flags & RVT_S_WAIT_PSN &&
Ralph Campbellf9315512010-05-23 21:44:54 -07001002 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001003 qp->s_flags &= ~RVT_S_WAIT_PSN;
Ralph Campbellf9315512010-05-23 21:44:54 -07001004 qp->s_sending_psn = qp->s_psn;
1005 qp->s_sending_hpsn = qp->s_psn - 1;
1006 qib_schedule_send(qp);
1007 }
1008}
1009
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001010static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
Ralph Campbellf9315512010-05-23 21:44:54 -07001011{
1012 qp->s_last_psn = psn;
1013}
1014
1015/*
1016 * Generate a SWQE completion.
1017 * This is similar to qib_send_complete but has to check to be sure
1018 * that the SGEs are not being referenced if the SWQE is being resent.
1019 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001020static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1021 struct rvt_swqe *wqe,
Ralph Campbellf9315512010-05-23 21:44:54 -07001022 struct qib_ibport *ibp)
1023{
Ralph Campbellf9315512010-05-23 21:44:54 -07001024 unsigned i;
1025
1026 /*
1027 * Don't decrement refcount and don't generate a
1028 * completion if the SWQE is being resent until the send
1029 * is finished.
1030 */
1031 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
1032 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
Mike Marciniszynee845412016-02-04 11:03:28 -08001033 u32 s_last;
1034
Ralph Campbellf9315512010-05-23 21:44:54 -07001035 for (i = 0; i < wqe->wr.num_sge; i++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001036 struct rvt_sge *sge = &wqe->sg_list[i];
Ralph Campbellf9315512010-05-23 21:44:54 -07001037
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001038 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001039 }
Mike Marciniszynee845412016-02-04 11:03:28 -08001040 s_last = qp->s_last;
1041 if (++s_last >= qp->s_size)
1042 s_last = 0;
1043 qp->s_last = s_last;
1044 /* see post_send() */
1045 barrier();
Mike Marciniszyn0771da52016-12-07 19:34:12 -08001046 rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
Ralph Campbellf9315512010-05-23 21:44:54 -07001047 } else
Harish Chegondif24a6d42016-01-22 12:56:02 -08001048 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001049
1050 qp->s_retry = qp->s_retry_cnt;
1051 update_last_psn(qp, wqe->lpsn);
1052
1053 /*
1054 * If we are completing a request which is in the process of
1055 * being resent, we can stop resending it since we know the
1056 * responder has already seen it.
1057 */
1058 if (qp->s_acked == qp->s_cur) {
1059 if (++qp->s_cur >= qp->s_size)
1060 qp->s_cur = 0;
1061 qp->s_acked = qp->s_cur;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001062 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
Ralph Campbellf9315512010-05-23 21:44:54 -07001063 if (qp->s_acked != qp->s_tail) {
1064 qp->s_state = OP(SEND_LAST);
1065 qp->s_psn = wqe->psn;
1066 }
1067 } else {
1068 if (++qp->s_acked >= qp->s_size)
1069 qp->s_acked = 0;
1070 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1071 qp->s_draining = 0;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001072 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001073 }
1074 return wqe;
1075}
1076
1077/**
1078 * do_rc_ack - process an incoming RC ACK
1079 * @qp: the QP the ACK came in on
1080 * @psn: the packet sequence number of the ACK
1081 * @opcode: the opcode of the request that resulted in the ACK
1082 *
1083 * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1084 * for the given QP.
1085 * Called at interrupt level with the QP s_lock held.
1086 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1087 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001088static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
Ralph Campbellf9315512010-05-23 21:44:54 -07001089 u64 val, struct qib_ctxtdata *rcd)
1090{
1091 struct qib_ibport *ibp;
1092 enum ib_wc_status status;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001093 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -07001094 int ret = 0;
1095 u32 ack_psn;
1096 int diff;
1097
1098 /* Remove QP from retry timer */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001099 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1100 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001101 del_timer(&qp->s_timer);
1102 }
1103
1104 /*
1105 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1106 * requests and implicitly NAK RDMA read and atomic requests issued
1107 * before the NAK'ed request. The MSN won't include the NAK'ed
1108 * request but will include an ACK'ed request(s).
1109 */
1110 ack_psn = psn;
1111 if (aeth >> 29)
1112 ack_psn--;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001113 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001114 ibp = to_iport(qp->ibqp.device, qp->port_num);
1115
1116 /*
1117 * The MSN might be for a later WQE than the PSN indicates so
1118 * only complete WQEs that the PSN finishes.
1119 */
1120 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1121 /*
1122 * RDMA_READ_RESPONSE_ONLY is a special case since
1123 * we want to generate completion events for everything
1124 * before the RDMA read, copy the data, then generate
1125 * the completion for the read.
1126 */
1127 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1128 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1129 diff == 0) {
1130 ret = 1;
1131 goto bail;
1132 }
1133 /*
1134 * If this request is a RDMA read or atomic, and the ACK is
1135 * for a later operation, this ACK NAKs the RDMA read or
1136 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1137 * can ACK a RDMA read and likewise for atomic ops. Note
1138 * that the NAK case can only happen if relaxed ordering is
1139 * used and requests are sent after an RDMA read or atomic
1140 * is sent but before the response is received.
1141 */
1142 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1143 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1144 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1145 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1146 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1147 /* Retry this request. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001148 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1149 qp->r_flags |= RVT_R_RDMAR_SEQ;
Ralph Campbellf9315512010-05-23 21:44:54 -07001150 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1151 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001152 qp->r_flags |= RVT_R_RSP_SEND;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07001153 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001154 list_add_tail(&qp->rspwait,
1155 &rcd->qp_wait_list);
1156 }
1157 }
1158 /*
1159 * No need to process the ACK/NAK since we are
1160 * restarting an earlier request.
1161 */
1162 goto bail;
1163 }
1164 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1165 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1166 u64 *vaddr = wqe->sg_list[0].vaddr;
1167 *vaddr = val;
1168 }
1169 if (qp->s_num_rd_atomic &&
1170 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1171 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1172 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1173 qp->s_num_rd_atomic--;
1174 /* Restart sending task if fence is complete */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001175 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
Ralph Campbellf9315512010-05-23 21:44:54 -07001176 !qp->s_num_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001177 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1178 RVT_S_WAIT_ACK);
Ralph Campbellf9315512010-05-23 21:44:54 -07001179 qib_schedule_send(qp);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001180 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1181 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1182 RVT_S_WAIT_ACK);
Ralph Campbellf9315512010-05-23 21:44:54 -07001183 qib_schedule_send(qp);
1184 }
1185 }
1186 wqe = do_rc_completion(qp, wqe, ibp);
1187 if (qp->s_acked == qp->s_tail)
1188 break;
1189 }
1190
1191 switch (aeth >> 29) {
1192 case 0: /* ACK */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001193 this_cpu_inc(*ibp->rvp.rc_acks);
Ralph Campbellf9315512010-05-23 21:44:54 -07001194 if (qp->s_acked != qp->s_tail) {
1195 /*
1196 * We are expecting more ACKs so
1197 * reset the retransmit timer.
1198 */
1199 start_timer(qp);
1200 /*
1201 * We can stop resending the earlier packets and
1202 * continue with the next packet the receiver wants.
1203 */
1204 if (qib_cmp24(qp->s_psn, psn) <= 0)
1205 reset_psn(qp, psn + 1);
1206 } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
1207 qp->s_state = OP(SEND_LAST);
1208 qp->s_psn = psn + 1;
1209 }
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001210 if (qp->s_flags & RVT_S_WAIT_ACK) {
1211 qp->s_flags &= ~RVT_S_WAIT_ACK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001212 qib_schedule_send(qp);
1213 }
1214 qib_get_credit(qp, aeth);
1215 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1216 qp->s_retry = qp->s_retry_cnt;
1217 update_last_psn(qp, psn);
1218 ret = 1;
1219 goto bail;
1220
1221 case 1: /* RNR NAK */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001222 ibp->rvp.n_rnr_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001223 if (qp->s_acked == qp->s_tail)
1224 goto bail;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001225 if (qp->s_flags & RVT_S_WAIT_RNR)
Ralph Campbellf9315512010-05-23 21:44:54 -07001226 goto bail;
1227 if (qp->s_rnr_retry == 0) {
1228 status = IB_WC_RNR_RETRY_EXC_ERR;
1229 goto class_b;
1230 }
1231 if (qp->s_rnr_retry_cnt < 7)
1232 qp->s_rnr_retry--;
1233
1234 /* The last valid PSN is the previous PSN. */
1235 update_last_psn(qp, psn - 1);
1236
Harish Chegondif24a6d42016-01-22 12:56:02 -08001237 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001238
1239 reset_psn(qp, psn);
1240
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001241 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1242 qp->s_flags |= RVT_S_WAIT_RNR;
Ralph Campbellf9315512010-05-23 21:44:54 -07001243 qp->s_timer.function = qib_rc_rnr_retry;
1244 qp->s_timer.expires = jiffies + usecs_to_jiffies(
1245 ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
1246 QIB_AETH_CREDIT_MASK]);
1247 add_timer(&qp->s_timer);
1248 goto bail;
1249
1250 case 3: /* NAK */
1251 if (qp->s_acked == qp->s_tail)
1252 goto bail;
1253 /* The last valid PSN is the previous PSN. */
1254 update_last_psn(qp, psn - 1);
1255 switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
1256 QIB_AETH_CREDIT_MASK) {
1257 case 0: /* PSN sequence error */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001258 ibp->rvp.n_seq_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001259 /*
1260 * Back up to the responder's expected PSN.
1261 * Note that we might get a NAK in the middle of an
1262 * RDMA READ response which terminates the RDMA
1263 * READ.
1264 */
1265 qib_restart_rc(qp, psn, 0);
1266 qib_schedule_send(qp);
1267 break;
1268
1269 case 1: /* Invalid Request */
1270 status = IB_WC_REM_INV_REQ_ERR;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001271 ibp->rvp.n_other_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001272 goto class_b;
1273
1274 case 2: /* Remote Access Error */
1275 status = IB_WC_REM_ACCESS_ERR;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001276 ibp->rvp.n_other_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001277 goto class_b;
1278
1279 case 3: /* Remote Operation Error */
1280 status = IB_WC_REM_OP_ERR;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001281 ibp->rvp.n_other_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001282class_b:
1283 if (qp->s_last == qp->s_acked) {
1284 qib_send_complete(qp, wqe, status);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001285 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001286 }
1287 break;
1288
1289 default:
1290 /* Ignore other reserved NAK error codes */
1291 goto reserved;
1292 }
1293 qp->s_retry = qp->s_retry_cnt;
1294 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1295 goto bail;
1296
1297 default: /* 2: reserved */
1298reserved:
1299 /* Ignore reserved NAK codes. */
1300 goto bail;
1301 }
1302
1303bail:
1304 return ret;
1305}
1306
1307/*
1308 * We have seen an out of sequence RDMA read middle or last packet.
1309 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1310 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001311static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
Ralph Campbellf9315512010-05-23 21:44:54 -07001312 struct qib_ctxtdata *rcd)
1313{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001314 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -07001315
1316 /* Remove QP from retry timer */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001317 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1318 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001319 del_timer(&qp->s_timer);
1320 }
1321
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001322 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001323
1324 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1325 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1326 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1327 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1328 break;
1329 wqe = do_rc_completion(qp, wqe, ibp);
1330 }
1331
Harish Chegondif24a6d42016-01-22 12:56:02 -08001332 ibp->rvp.n_rdma_seq++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001333 qp->r_flags |= RVT_R_RDMAR_SEQ;
Ralph Campbellf9315512010-05-23 21:44:54 -07001334 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1335 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001336 qp->r_flags |= RVT_R_RSP_SEND;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07001337 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001338 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1339 }
1340}
1341
1342/**
1343 * qib_rc_rcv_resp - process an incoming RC response packet
1344 * @ibp: the port this packet came in on
1345 * @ohdr: the other headers for this packet
1346 * @data: the packet data
1347 * @tlen: the packet length
1348 * @qp: the QP for this packet
1349 * @opcode: the opcode for this packet
1350 * @psn: the packet sequence number for this packet
1351 * @hdrsize: the header length
1352 * @pmtu: the path MTU
1353 *
1354 * This is called from qib_rc_rcv() to process an incoming RC response
1355 * packet for the given QP.
1356 * Called at interrupt level.
1357 */
1358static void qib_rc_rcv_resp(struct qib_ibport *ibp,
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001359 struct ib_other_headers *ohdr,
Ralph Campbellf9315512010-05-23 21:44:54 -07001360 void *data, u32 tlen,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001361 struct rvt_qp *qp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001362 u32 opcode,
1363 u32 psn, u32 hdrsize, u32 pmtu,
1364 struct qib_ctxtdata *rcd)
1365{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001366 struct rvt_swqe *wqe;
Mike Marciniszyndd04e432011-01-10 17:42:22 -08001367 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001368 enum ib_wc_status status;
1369 unsigned long flags;
1370 int diff;
1371 u32 pad;
1372 u32 aeth;
1373 u64 val;
1374
Mike Marciniszyndd04e432011-01-10 17:42:22 -08001375 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1376 /*
1377 * If ACK'd PSN on SDMA busy list try to make progress to
1378 * reclaim SDMA credits.
1379 */
1380 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1381 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1382
1383 /*
1384 * If send tasklet not running attempt to progress
1385 * SDMA queue.
1386 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001387 if (!(qp->s_flags & RVT_S_BUSY)) {
Mike Marciniszyndd04e432011-01-10 17:42:22 -08001388 /* Acquire SDMA Lock */
1389 spin_lock_irqsave(&ppd->sdma_lock, flags);
1390 /* Invoke sdma make progress */
1391 qib_sdma_make_progress(ppd);
1392 /* Release SDMA Lock */
1393 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1394 }
1395 }
1396 }
1397
Ralph Campbellf9315512010-05-23 21:44:54 -07001398 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001399 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Mike Marciniszyn414ed902011-02-10 14:11:28 +00001400 goto ack_done;
Ralph Campbellf9315512010-05-23 21:44:54 -07001401
Ralph Campbellf9315512010-05-23 21:44:54 -07001402 /* Ignore invalid responses. */
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001403 smp_read_barrier_depends(); /* see post_one_send */
1404 if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
Ralph Campbellf9315512010-05-23 21:44:54 -07001405 goto ack_done;
1406
1407 /* Ignore duplicate responses. */
1408 diff = qib_cmp24(psn, qp->s_last_psn);
1409 if (unlikely(diff <= 0)) {
1410 /* Update credits for "ghost" ACKs */
1411 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1412 aeth = be32_to_cpu(ohdr->u.aeth);
1413 if ((aeth >> 29) == 0)
1414 qib_get_credit(qp, aeth);
1415 }
1416 goto ack_done;
1417 }
1418
1419 /*
1420 * Skip everything other than the PSN we expect, if we are waiting
1421 * for a reply to a restarted RDMA read or atomic op.
1422 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001423 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001424 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1425 goto ack_done;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001426 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
Ralph Campbellf9315512010-05-23 21:44:54 -07001427 }
1428
1429 if (unlikely(qp->s_acked == qp->s_tail))
1430 goto ack_done;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001431 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001432 status = IB_WC_SUCCESS;
1433
1434 switch (opcode) {
1435 case OP(ACKNOWLEDGE):
1436 case OP(ATOMIC_ACKNOWLEDGE):
1437 case OP(RDMA_READ_RESPONSE_FIRST):
1438 aeth = be32_to_cpu(ohdr->u.aeth);
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001439 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1440 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1441 else
Ralph Campbellf9315512010-05-23 21:44:54 -07001442 val = 0;
1443 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1444 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1445 goto ack_done;
1446 hdrsize += 4;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001447 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001448 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1449 goto ack_op_err;
1450 /*
1451 * If this is a response to a resent RDMA read, we
1452 * have to be careful to copy the data to the right
1453 * location.
1454 */
1455 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1456 wqe, psn, pmtu);
1457 goto read_middle;
1458
1459 case OP(RDMA_READ_RESPONSE_MIDDLE):
1460 /* no AETH, no ACK */
1461 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1462 goto ack_seq_err;
1463 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1464 goto ack_op_err;
1465read_middle:
1466 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1467 goto ack_len_err;
1468 if (unlikely(pmtu >= qp->s_rdma_read_len))
1469 goto ack_len_err;
1470
1471 /*
1472 * We got a response so update the timeout.
1473 * 4.096 usec. * (1 << qp->timeout)
1474 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001475 qp->s_flags |= RVT_S_TIMER;
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -04001476 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001477 if (qp->s_flags & RVT_S_WAIT_ACK) {
1478 qp->s_flags &= ~RVT_S_WAIT_ACK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001479 qib_schedule_send(qp);
1480 }
1481
1482 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1483 qp->s_retry = qp->s_retry_cnt;
1484
1485 /*
1486 * Update the RDMA receive state but do the copy w/o
1487 * holding the locks and blocking interrupts.
1488 */
1489 qp->s_rdma_read_len -= pmtu;
1490 update_last_psn(qp, psn);
1491 spin_unlock_irqrestore(&qp->s_lock, flags);
1492 qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
1493 goto bail;
1494
1495 case OP(RDMA_READ_RESPONSE_ONLY):
1496 aeth = be32_to_cpu(ohdr->u.aeth);
1497 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1498 goto ack_done;
1499 /* Get the number of bytes the message was padded by. */
1500 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1501 /*
1502 * Check that the data size is >= 0 && <= pmtu.
1503 * Remember to account for the AETH header (4) and
1504 * ICRC (4).
1505 */
1506 if (unlikely(tlen < (hdrsize + pad + 8)))
1507 goto ack_len_err;
1508 /*
1509 * If this is a response to a resent RDMA read, we
1510 * have to be careful to copy the data to the right
1511 * location.
1512 */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001513 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001514 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1515 wqe, psn, pmtu);
1516 goto read_last;
1517
1518 case OP(RDMA_READ_RESPONSE_LAST):
1519 /* ACKs READ req. */
1520 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1521 goto ack_seq_err;
1522 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1523 goto ack_op_err;
1524 /* Get the number of bytes the message was padded by. */
1525 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1526 /*
1527 * Check that the data size is >= 1 && <= pmtu.
1528 * Remember to account for the AETH header (4) and
1529 * ICRC (4).
1530 */
1531 if (unlikely(tlen <= (hdrsize + pad + 8)))
1532 goto ack_len_err;
1533read_last:
1534 tlen -= hdrsize + pad + 8;
1535 if (unlikely(tlen != qp->s_rdma_read_len))
1536 goto ack_len_err;
1537 aeth = be32_to_cpu(ohdr->u.aeth);
1538 qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
1539 WARN_ON(qp->s_rdma_read_sge.num_sge);
1540 (void) do_rc_ack(qp, aeth, psn,
1541 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1542 goto ack_done;
1543 }
1544
1545ack_op_err:
1546 status = IB_WC_LOC_QP_OP_ERR;
1547 goto ack_err;
1548
1549ack_seq_err:
1550 rdma_seq_err(qp, ibp, psn, rcd);
1551 goto ack_done;
1552
1553ack_len_err:
1554 status = IB_WC_LOC_LEN_ERR;
1555ack_err:
1556 if (qp->s_last == qp->s_acked) {
1557 qib_send_complete(qp, wqe, status);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001558 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001559 }
1560ack_done:
1561 spin_unlock_irqrestore(&qp->s_lock, flags);
1562bail:
1563 return;
1564}
1565
1566/**
1567 * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1568 * @ohdr: the other headers for this packet
1569 * @data: the packet data
1570 * @qp: the QP for this packet
1571 * @opcode: the opcode for this packet
1572 * @psn: the packet sequence number for this packet
1573 * @diff: the difference between the PSN and the expected PSN
1574 *
1575 * This is called from qib_rc_rcv() to process an unexpected
1576 * incoming RC packet for the given QP.
1577 * Called at interrupt level.
1578 * Return 1 if no more processing is needed; otherwise return 0 to
1579 * schedule a response to be sent.
1580 */
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001581static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
Ralph Campbellf9315512010-05-23 21:44:54 -07001582 void *data,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001583 struct rvt_qp *qp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001584 u32 opcode,
1585 u32 psn,
1586 int diff,
1587 struct qib_ctxtdata *rcd)
1588{
1589 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001590 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -07001591 unsigned long flags;
1592 u8 i, prev;
1593 int old_req;
1594
1595 if (diff > 0) {
1596 /*
1597 * Packet sequence error.
1598 * A NAK will ACK earlier sends and RDMA writes.
1599 * Don't queue the NAK if we already sent one.
1600 */
1601 if (!qp->r_nak_state) {
Harish Chegondif24a6d42016-01-22 12:56:02 -08001602 ibp->rvp.n_rc_seqnak++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001603 qp->r_nak_state = IB_NAK_PSN_ERROR;
1604 /* Use the expected PSN. */
1605 qp->r_ack_psn = qp->r_psn;
1606 /*
1607 * Wait to send the sequence NAK until all packets
1608 * in the receive queue have been processed.
1609 * Otherwise, we end up propagating congestion.
1610 */
1611 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001612 qp->r_flags |= RVT_R_RSP_NAK;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07001613 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001614 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1615 }
1616 }
1617 goto done;
1618 }
1619
1620 /*
1621 * Handle a duplicate request. Don't re-execute SEND, RDMA
1622 * write or atomic op. Don't NAK errors, just silently drop
1623 * the duplicate request. Note that r_sge, r_len, and
1624 * r_rcv_len may be in use so don't modify them.
1625 *
1626 * We are supposed to ACK the earliest duplicate PSN but we
1627 * can coalesce an outstanding duplicate ACK. We have to
1628 * send the earliest so that RDMA reads can be restarted at
1629 * the requester's expected PSN.
1630 *
1631 * First, find where this duplicate PSN falls within the
1632 * ACKs previously sent.
1633 * old_req is true if there is an older response that is scheduled
1634 * to be sent before sending this one.
1635 */
1636 e = NULL;
1637 old_req = 1;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001638 ibp->rvp.n_rc_dupreq++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001639
1640 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001641
1642 for (i = qp->r_head_ack_queue; ; i = prev) {
1643 if (i == qp->s_tail_ack_queue)
1644 old_req = 0;
1645 if (i)
1646 prev = i - 1;
1647 else
1648 prev = QIB_MAX_RDMA_ATOMIC;
1649 if (prev == qp->r_head_ack_queue) {
1650 e = NULL;
1651 break;
1652 }
1653 e = &qp->s_ack_queue[prev];
1654 if (!e->opcode) {
1655 e = NULL;
1656 break;
1657 }
1658 if (qib_cmp24(psn, e->psn) >= 0) {
1659 if (prev == qp->s_tail_ack_queue &&
1660 qib_cmp24(psn, e->lpsn) <= 0)
1661 old_req = 0;
1662 break;
1663 }
1664 }
1665 switch (opcode) {
1666 case OP(RDMA_READ_REQUEST): {
1667 struct ib_reth *reth;
1668 u32 offset;
1669 u32 len;
1670
1671 /*
1672 * If we didn't find the RDMA read request in the ack queue,
1673 * we can ignore this request.
1674 */
1675 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1676 goto unlock_done;
1677 /* RETH comes after BTH */
1678 reth = &ohdr->u.rc.reth;
1679 /*
1680 * Address range must be a subset of the original
1681 * request and start on pmtu boundaries.
1682 * We reuse the old ack_queue slot since the requester
1683 * should not back up and request an earlier PSN for the
1684 * same request.
1685 */
1686 offset = ((psn - e->psn) & QIB_PSN_MASK) *
Mike Marciniszyncc6ea132011-09-23 13:16:34 -04001687 qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -07001688 len = be32_to_cpu(reth->length);
1689 if (unlikely(offset + len != e->rdma_sge.sge_length))
1690 goto unlock_done;
1691 if (e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001692 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001693 e->rdma_sge.mr = NULL;
1694 }
1695 if (len != 0) {
1696 u32 rkey = be32_to_cpu(reth->rkey);
1697 u64 vaddr = be64_to_cpu(reth->vaddr);
1698 int ok;
1699
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001700 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
Ralph Campbellf9315512010-05-23 21:44:54 -07001701 IB_ACCESS_REMOTE_READ);
1702 if (unlikely(!ok))
1703 goto unlock_done;
1704 } else {
1705 e->rdma_sge.vaddr = NULL;
1706 e->rdma_sge.length = 0;
1707 e->rdma_sge.sge_length = 0;
1708 }
1709 e->psn = psn;
1710 if (old_req)
1711 goto unlock_done;
1712 qp->s_tail_ack_queue = prev;
1713 break;
1714 }
1715
1716 case OP(COMPARE_SWAP):
1717 case OP(FETCH_ADD): {
1718 /*
1719 * If we didn't find the atomic request in the ack queue
1720 * or the send tasklet is already backed up to send an
1721 * earlier entry, we can ignore this request.
1722 */
1723 if (!e || e->opcode != (u8) opcode || old_req)
1724 goto unlock_done;
1725 qp->s_tail_ack_queue = prev;
1726 break;
1727 }
1728
1729 default:
1730 /*
1731 * Ignore this operation if it doesn't request an ACK
1732 * or an earlier RDMA read or atomic is going to be resent.
1733 */
1734 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1735 goto unlock_done;
1736 /*
1737 * Resend the most recent ACK if this request is
1738 * after all the previous RDMA reads and atomics.
1739 */
1740 if (i == qp->r_head_ack_queue) {
1741 spin_unlock_irqrestore(&qp->s_lock, flags);
1742 qp->r_nak_state = 0;
1743 qp->r_ack_psn = qp->r_psn - 1;
1744 goto send_ack;
1745 }
1746 /*
1747 * Try to send a simple ACK to work around a Mellanox bug
1748 * which doesn't accept a RDMA read response or atomic
1749 * response as an ACK for earlier SENDs or RDMA writes.
1750 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001751 if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001752 spin_unlock_irqrestore(&qp->s_lock, flags);
1753 qp->r_nak_state = 0;
1754 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1755 goto send_ack;
1756 }
1757 /*
1758 * Resend the RDMA read or atomic op which
1759 * ACKs this duplicate request.
1760 */
1761 qp->s_tail_ack_queue = i;
1762 break;
1763 }
1764 qp->s_ack_state = OP(ACKNOWLEDGE);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001765 qp->s_flags |= RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -07001766 qp->r_nak_state = 0;
1767 qib_schedule_send(qp);
1768
1769unlock_done:
1770 spin_unlock_irqrestore(&qp->s_lock, flags);
1771done:
1772 return 1;
1773
1774send_ack:
1775 return 0;
1776}
1777
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001778void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
Ralph Campbellf9315512010-05-23 21:44:54 -07001779{
1780 unsigned long flags;
1781 int lastwqe;
1782
1783 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001784 lastwqe = rvt_error_qp(qp, err);
Ralph Campbellf9315512010-05-23 21:44:54 -07001785 spin_unlock_irqrestore(&qp->s_lock, flags);
1786
1787 if (lastwqe) {
1788 struct ib_event ev;
1789
1790 ev.device = qp->ibqp.device;
1791 ev.element.qp = &qp->ibqp;
1792 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1793 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1794 }
1795}
1796
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001797static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
Ralph Campbellf9315512010-05-23 21:44:54 -07001798{
1799 unsigned next;
1800
1801 next = n + 1;
1802 if (next > QIB_MAX_RDMA_ATOMIC)
1803 next = 0;
1804 qp->s_tail_ack_queue = next;
1805 qp->s_ack_state = OP(ACKNOWLEDGE);
1806}
1807
1808/**
1809 * qib_rc_rcv - process an incoming RC packet
1810 * @rcd: the context pointer
1811 * @hdr: the header of this packet
1812 * @has_grh: true if the header has a GRH
1813 * @data: the packet data
1814 * @tlen: the packet length
1815 * @qp: the QP for this packet
1816 *
1817 * This is called from qib_qp_rcv() to process an incoming RC packet
1818 * for the given QP.
1819 * Called at interrupt level.
1820 */
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001821void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001822 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -07001823{
1824 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001825 struct ib_other_headers *ohdr;
Ralph Campbellf9315512010-05-23 21:44:54 -07001826 u32 opcode;
1827 u32 hdrsize;
1828 u32 psn;
1829 u32 pad;
1830 struct ib_wc wc;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -04001831 u32 pmtu = qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -07001832 int diff;
1833 struct ib_reth *reth;
1834 unsigned long flags;
1835 int ret;
1836
1837 /* Check for GRH */
1838 if (!has_grh) {
1839 ohdr = &hdr->u.oth;
1840 hdrsize = 8 + 12; /* LRH + BTH */
1841 } else {
1842 ohdr = &hdr->u.l.oth;
1843 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1844 }
1845
1846 opcode = be32_to_cpu(ohdr->bth[0]);
Ralph Campbellf9315512010-05-23 21:44:54 -07001847 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
Mike Marciniszyn9fd54732011-09-23 13:17:00 -04001848 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07001849
1850 psn = be32_to_cpu(ohdr->bth[2]);
1851 opcode >>= 24;
1852
Ralph Campbellf9315512010-05-23 21:44:54 -07001853 /*
1854 * Process responses (ACKs) before anything else. Note that the
1855 * packet sequence number will be for something in the send work
1856 * queue rather than the expected receive packet sequence number.
1857 * In other words, this QP is the requester.
1858 */
1859 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1860 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1861 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1862 hdrsize, pmtu, rcd);
Ralph Campbella5210c12010-08-02 22:39:30 +00001863 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07001864 }
1865
1866 /* Compute 24 bits worth of difference. */
1867 diff = qib_cmp24(psn, qp->r_psn);
1868 if (unlikely(diff)) {
1869 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
Ralph Campbella5210c12010-08-02 22:39:30 +00001870 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07001871 goto send_ack;
1872 }
1873
1874 /* Check for opcode sequence errors. */
1875 switch (qp->r_state) {
1876 case OP(SEND_FIRST):
1877 case OP(SEND_MIDDLE):
1878 if (opcode == OP(SEND_MIDDLE) ||
1879 opcode == OP(SEND_LAST) ||
1880 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1881 break;
1882 goto nack_inv;
1883
1884 case OP(RDMA_WRITE_FIRST):
1885 case OP(RDMA_WRITE_MIDDLE):
1886 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1887 opcode == OP(RDMA_WRITE_LAST) ||
1888 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1889 break;
1890 goto nack_inv;
1891
1892 default:
1893 if (opcode == OP(SEND_MIDDLE) ||
1894 opcode == OP(SEND_LAST) ||
1895 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1896 opcode == OP(RDMA_WRITE_MIDDLE) ||
1897 opcode == OP(RDMA_WRITE_LAST) ||
1898 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1899 goto nack_inv;
1900 /*
1901 * Note that it is up to the requester to not send a new
1902 * RDMA read or atomic operation before receiving an ACK
1903 * for the previous operation.
1904 */
1905 break;
1906 }
1907
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001908 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
1909 qp->r_flags |= RVT_R_COMM_EST;
Ralph Campbellf9315512010-05-23 21:44:54 -07001910 if (qp->ibqp.event_handler) {
1911 struct ib_event ev;
1912
1913 ev.device = qp->ibqp.device;
1914 ev.element.qp = &qp->ibqp;
1915 ev.event = IB_EVENT_COMM_EST;
1916 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1917 }
1918 }
1919
1920 /* OK, process the packet. */
1921 switch (opcode) {
1922 case OP(SEND_FIRST):
1923 ret = qib_get_rwqe(qp, 0);
1924 if (ret < 0)
1925 goto nack_op_err;
1926 if (!ret)
1927 goto rnr_nak;
1928 qp->r_rcv_len = 0;
1929 /* FALLTHROUGH */
1930 case OP(SEND_MIDDLE):
1931 case OP(RDMA_WRITE_MIDDLE):
1932send_middle:
1933 /* Check for invalid length PMTU or posted rwqe len. */
1934 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1935 goto nack_inv;
1936 qp->r_rcv_len += pmtu;
1937 if (unlikely(qp->r_rcv_len > qp->r_len))
1938 goto nack_inv;
1939 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
1940 break;
1941
1942 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1943 /* consume RWQE */
1944 ret = qib_get_rwqe(qp, 1);
1945 if (ret < 0)
1946 goto nack_op_err;
1947 if (!ret)
1948 goto rnr_nak;
1949 goto send_last_imm;
1950
1951 case OP(SEND_ONLY):
1952 case OP(SEND_ONLY_WITH_IMMEDIATE):
1953 ret = qib_get_rwqe(qp, 0);
1954 if (ret < 0)
1955 goto nack_op_err;
1956 if (!ret)
1957 goto rnr_nak;
1958 qp->r_rcv_len = 0;
1959 if (opcode == OP(SEND_ONLY))
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04001960 goto no_immediate_data;
1961 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
Ralph Campbellf9315512010-05-23 21:44:54 -07001962 case OP(SEND_LAST_WITH_IMMEDIATE):
1963send_last_imm:
1964 wc.ex.imm_data = ohdr->u.imm_data;
1965 hdrsize += 4;
1966 wc.wc_flags = IB_WC_WITH_IMM;
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04001967 goto send_last;
Ralph Campbellf9315512010-05-23 21:44:54 -07001968 case OP(SEND_LAST):
1969 case OP(RDMA_WRITE_LAST):
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04001970no_immediate_data:
1971 wc.wc_flags = 0;
1972 wc.ex.imm_data = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001973send_last:
1974 /* Get the number of bytes the message was padded by. */
1975 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1976 /* Check for invalid length. */
1977 /* XXX LAST len should be >= 1 */
1978 if (unlikely(tlen < (hdrsize + pad + 4)))
1979 goto nack_inv;
1980 /* Don't count the CRC. */
1981 tlen -= (hdrsize + pad + 4);
1982 wc.byte_len = tlen + qp->r_rcv_len;
1983 if (unlikely(wc.byte_len > qp->r_len))
1984 goto nack_inv;
1985 qib_copy_sge(&qp->r_sge, data, tlen, 1);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001986 rvt_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -07001987 qp->r_msn++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001988 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
Ralph Campbellf9315512010-05-23 21:44:54 -07001989 break;
1990 wc.wr_id = qp->r_wr_id;
1991 wc.status = IB_WC_SUCCESS;
1992 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1993 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1994 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1995 else
1996 wc.opcode = IB_WC_RECV;
1997 wc.qp = &qp->ibqp;
1998 wc.src_qp = qp->remote_qpn;
1999 wc.slid = qp->remote_ah_attr.dlid;
2000 wc.sl = qp->remote_ah_attr.sl;
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04002001 /* zero fields that are N/A */
2002 wc.vendor_err = 0;
2003 wc.pkey_index = 0;
2004 wc.dlid_path_bits = 0;
2005 wc.port_num = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07002006 /* Signal completion event if the solicited bit is set. */
Harish Chegondi4bb88e52016-01-22 13:07:36 -08002007 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
Ralph Campbellf9315512010-05-23 21:44:54 -07002008 (ohdr->bth[0] &
2009 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
2010 break;
2011
2012 case OP(RDMA_WRITE_FIRST):
2013 case OP(RDMA_WRITE_ONLY):
2014 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2015 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2016 goto nack_inv;
2017 /* consume RWQE */
2018 reth = &ohdr->u.rc.reth;
2019 hdrsize += sizeof(*reth);
2020 qp->r_len = be32_to_cpu(reth->length);
2021 qp->r_rcv_len = 0;
2022 qp->r_sge.sg_list = NULL;
2023 if (qp->r_len != 0) {
2024 u32 rkey = be32_to_cpu(reth->rkey);
2025 u64 vaddr = be64_to_cpu(reth->vaddr);
2026 int ok;
2027
2028 /* Check rkey & NAK */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002029 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
Ralph Campbellf9315512010-05-23 21:44:54 -07002030 rkey, IB_ACCESS_REMOTE_WRITE);
2031 if (unlikely(!ok))
2032 goto nack_acc;
2033 qp->r_sge.num_sge = 1;
2034 } else {
2035 qp->r_sge.num_sge = 0;
2036 qp->r_sge.sge.mr = NULL;
2037 qp->r_sge.sge.vaddr = NULL;
2038 qp->r_sge.sge.length = 0;
2039 qp->r_sge.sge.sge_length = 0;
2040 }
2041 if (opcode == OP(RDMA_WRITE_FIRST))
2042 goto send_middle;
2043 else if (opcode == OP(RDMA_WRITE_ONLY))
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04002044 goto no_immediate_data;
Ralph Campbellf9315512010-05-23 21:44:54 -07002045 ret = qib_get_rwqe(qp, 1);
2046 if (ret < 0)
2047 goto nack_op_err;
2048 if (!ret)
2049 goto rnr_nak;
Jason Gunthorpe5715f5d2010-10-22 22:00:48 +00002050 wc.ex.imm_data = ohdr->u.rc.imm_data;
2051 hdrsize += 4;
2052 wc.wc_flags = IB_WC_WITH_IMM;
2053 goto send_last;
Ralph Campbellf9315512010-05-23 21:44:54 -07002054
2055 case OP(RDMA_READ_REQUEST): {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002056 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -07002057 u32 len;
2058 u8 next;
2059
2060 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2061 goto nack_inv;
2062 next = qp->r_head_ack_queue + 1;
2063 /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
2064 if (next > QIB_MAX_RDMA_ATOMIC)
2065 next = 0;
2066 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07002067 if (unlikely(next == qp->s_tail_ack_queue)) {
2068 if (!qp->s_ack_queue[next].sent)
2069 goto nack_inv_unlck;
2070 qib_update_ack_queue(qp, next);
2071 }
2072 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2073 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002074 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07002075 e->rdma_sge.mr = NULL;
2076 }
2077 reth = &ohdr->u.rc.reth;
2078 len = be32_to_cpu(reth->length);
2079 if (len) {
2080 u32 rkey = be32_to_cpu(reth->rkey);
2081 u64 vaddr = be64_to_cpu(reth->vaddr);
2082 int ok;
2083
2084 /* Check rkey & NAK */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002085 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
Ralph Campbellf9315512010-05-23 21:44:54 -07002086 rkey, IB_ACCESS_REMOTE_READ);
2087 if (unlikely(!ok))
2088 goto nack_acc_unlck;
2089 /*
2090 * Update the next expected PSN. We add 1 later
2091 * below, so only add the remainder here.
2092 */
2093 if (len > pmtu)
2094 qp->r_psn += (len - 1) / pmtu;
2095 } else {
2096 e->rdma_sge.mr = NULL;
2097 e->rdma_sge.vaddr = NULL;
2098 e->rdma_sge.length = 0;
2099 e->rdma_sge.sge_length = 0;
2100 }
2101 e->opcode = opcode;
2102 e->sent = 0;
2103 e->psn = psn;
2104 e->lpsn = qp->r_psn;
2105 /*
2106 * We need to increment the MSN here instead of when we
2107 * finish sending the result since a duplicate request would
2108 * increment it more than once.
2109 */
2110 qp->r_msn++;
2111 qp->r_psn++;
2112 qp->r_state = opcode;
2113 qp->r_nak_state = 0;
2114 qp->r_head_ack_queue = next;
2115
2116 /* Schedule the send tasklet. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002117 qp->s_flags |= RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -07002118 qib_schedule_send(qp);
2119
Ralph Campbella5210c12010-08-02 22:39:30 +00002120 goto sunlock;
Ralph Campbellf9315512010-05-23 21:44:54 -07002121 }
2122
2123 case OP(COMPARE_SWAP):
2124 case OP(FETCH_ADD): {
2125 struct ib_atomic_eth *ateth;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002126 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -07002127 u64 vaddr;
2128 atomic64_t *maddr;
2129 u64 sdata;
2130 u32 rkey;
2131 u8 next;
2132
2133 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2134 goto nack_inv;
2135 next = qp->r_head_ack_queue + 1;
2136 if (next > QIB_MAX_RDMA_ATOMIC)
2137 next = 0;
2138 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07002139 if (unlikely(next == qp->s_tail_ack_queue)) {
2140 if (!qp->s_ack_queue[next].sent)
2141 goto nack_inv_unlck;
2142 qib_update_ack_queue(qp, next);
2143 }
2144 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2145 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002146 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07002147 e->rdma_sge.mr = NULL;
2148 }
2149 ateth = &ohdr->u.atomic_eth;
Mike Marciniszyn261a4352016-09-06 04:35:05 -07002150 vaddr = get_ib_ateth_vaddr(ateth);
Ralph Campbellf9315512010-05-23 21:44:54 -07002151 if (unlikely(vaddr & (sizeof(u64) - 1)))
2152 goto nack_inv_unlck;
2153 rkey = be32_to_cpu(ateth->rkey);
2154 /* Check rkey & NAK */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002155 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
Ralph Campbellf9315512010-05-23 21:44:54 -07002156 vaddr, rkey,
2157 IB_ACCESS_REMOTE_ATOMIC)))
2158 goto nack_acc_unlck;
2159 /* Perform atomic OP and save result. */
2160 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
Mike Marciniszyn261a4352016-09-06 04:35:05 -07002161 sdata = get_ib_ateth_swap(ateth);
Ralph Campbellf9315512010-05-23 21:44:54 -07002162 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2163 (u64) atomic64_add_return(sdata, maddr) - sdata :
2164 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
Mike Marciniszyn261a4352016-09-06 04:35:05 -07002165 get_ib_ateth_compare(ateth),
Ralph Campbellf9315512010-05-23 21:44:54 -07002166 sdata);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002167 rvt_put_mr(qp->r_sge.sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07002168 qp->r_sge.num_sge = 0;
2169 e->opcode = opcode;
2170 e->sent = 0;
2171 e->psn = psn;
2172 e->lpsn = psn;
2173 qp->r_msn++;
2174 qp->r_psn++;
2175 qp->r_state = opcode;
2176 qp->r_nak_state = 0;
2177 qp->r_head_ack_queue = next;
2178
2179 /* Schedule the send tasklet. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002180 qp->s_flags |= RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -07002181 qib_schedule_send(qp);
2182
Ralph Campbella5210c12010-08-02 22:39:30 +00002183 goto sunlock;
Ralph Campbellf9315512010-05-23 21:44:54 -07002184 }
2185
2186 default:
2187 /* NAK unknown opcodes. */
2188 goto nack_inv;
2189 }
2190 qp->r_psn++;
2191 qp->r_state = opcode;
2192 qp->r_ack_psn = psn;
2193 qp->r_nak_state = 0;
2194 /* Send an ACK if requested or required. */
2195 if (psn & (1 << 31))
2196 goto send_ack;
Ralph Campbella5210c12010-08-02 22:39:30 +00002197 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002198
2199rnr_nak:
2200 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2201 qp->r_ack_psn = qp->r_psn;
2202 /* Queue RNR NAK for later */
2203 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002204 qp->r_flags |= RVT_R_RSP_NAK;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07002205 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07002206 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2207 }
Ralph Campbella5210c12010-08-02 22:39:30 +00002208 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002209
2210nack_op_err:
2211 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2212 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2213 qp->r_ack_psn = qp->r_psn;
2214 /* Queue NAK for later */
2215 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002216 qp->r_flags |= RVT_R_RSP_NAK;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07002217 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07002218 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2219 }
Ralph Campbella5210c12010-08-02 22:39:30 +00002220 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002221
2222nack_inv_unlck:
2223 spin_unlock_irqrestore(&qp->s_lock, flags);
2224nack_inv:
2225 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2226 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2227 qp->r_ack_psn = qp->r_psn;
2228 /* Queue NAK for later */
2229 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002230 qp->r_flags |= RVT_R_RSP_NAK;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07002231 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07002232 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2233 }
Ralph Campbella5210c12010-08-02 22:39:30 +00002234 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002235
2236nack_acc_unlck:
2237 spin_unlock_irqrestore(&qp->s_lock, flags);
2238nack_acc:
2239 qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
2240 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2241 qp->r_ack_psn = qp->r_psn;
2242send_ack:
2243 qib_send_rc_ack(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07002244 return;
2245
2246sunlock:
2247 spin_unlock_irqrestore(&qp->s_lock, flags);
2248}