blob: 2a495fbc6508bac2da284b32f15a4d60ac1471d0 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/io.h>
35
36#include "qib.h"
37
38/* cut down ridiculously long IB macro names */
39#define OP(x) IB_OPCODE_RC_##x
40
Ralph Campbellf9315512010-05-23 21:44:54 -070041
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080042static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
Ralph Campbellf9315512010-05-23 21:44:54 -070043 u32 psn, u32 pmtu)
44{
45 u32 len;
46
47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
48 ss->sge = wqe->sg_list[0];
49 ss->sg_list = wqe->sg_list + 1;
50 ss->num_sge = wqe->wr.num_sge;
51 ss->total_len = wqe->length;
Brian Welty3fc4a092017-02-08 05:27:43 -080052 rvt_skip_sge(ss, len, false);
Ralph Campbellf9315512010-05-23 21:44:54 -070053 return wqe->length - len;
54}
55
Ralph Campbellf9315512010-05-23 21:44:54 -070056/**
57 * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
58 * @dev: the device for this QP
59 * @qp: a pointer to the QP
60 * @ohdr: a pointer to the IB header being constructed
61 * @pmtu: the path MTU
62 *
63 * Return 1 if constructed; otherwise, return 0.
64 * Note that we are in the responder's side of the QP context.
65 * Note the QP s_lock must be held.
66 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080067static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
Mike Marciniszyn261a4352016-09-06 04:35:05 -070068 struct ib_other_headers *ohdr, u32 pmtu)
Ralph Campbellf9315512010-05-23 21:44:54 -070069{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080070 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -070071 u32 hwords;
72 u32 len;
73 u32 bth0;
74 u32 bth2;
75
76 /* Don't send an ACK if we aren't supposed to. */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -080077 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -070078 goto bail;
79
80 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
81 hwords = 5;
82
83 switch (qp->s_ack_state) {
84 case OP(RDMA_READ_RESPONSE_LAST):
85 case OP(RDMA_READ_RESPONSE_ONLY):
86 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
87 if (e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080088 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -070089 e->rdma_sge.mr = NULL;
90 }
91 /* FALLTHROUGH */
92 case OP(ATOMIC_ACKNOWLEDGE):
93 /*
94 * We can increment the tail pointer now that the last
95 * response has been sent instead of only being
96 * constructed.
97 */
98 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
99 qp->s_tail_ack_queue = 0;
100 /* FALLTHROUGH */
101 case OP(SEND_ONLY):
102 case OP(ACKNOWLEDGE):
103 /* Check for no next entry in the queue. */
104 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800105 if (qp->s_flags & RVT_S_ACK_PENDING)
Ralph Campbellf9315512010-05-23 21:44:54 -0700106 goto normal;
107 goto bail;
108 }
109
110 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
111 if (e->opcode == OP(RDMA_READ_REQUEST)) {
112 /*
113 * If a RDMA read response is being resent and
114 * we haven't seen the duplicate request yet,
115 * then stop sending the remaining responses the
116 * responder has seen until the requester resends it.
117 */
118 len = e->rdma_sge.sge_length;
119 if (len && !e->rdma_sge.mr) {
120 qp->s_tail_ack_queue = qp->r_head_ack_queue;
121 goto bail;
122 }
123 /* Copy SGE state in case we need to resend */
124 qp->s_rdma_mr = e->rdma_sge.mr;
125 if (qp->s_rdma_mr)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800126 rvt_get_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700127 qp->s_ack_rdma_sge.sge = e->rdma_sge;
128 qp->s_ack_rdma_sge.num_sge = 1;
129 qp->s_cur_sge = &qp->s_ack_rdma_sge;
130 if (len > pmtu) {
131 len = pmtu;
132 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
133 } else {
134 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
135 e->sent = 1;
136 }
Brian Welty696513e2017-02-08 05:27:07 -0800137 ohdr->u.aeth = rvt_compute_aeth(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700138 hwords++;
139 qp->s_ack_rdma_psn = e->psn;
140 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
141 } else {
142 /* COMPARE_SWAP or FETCH_ADD */
143 qp->s_cur_sge = NULL;
144 len = 0;
145 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
Brian Welty696513e2017-02-08 05:27:07 -0800146 ohdr->u.at.aeth = rvt_compute_aeth(qp);
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700147 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
Ralph Campbellf9315512010-05-23 21:44:54 -0700148 hwords += sizeof(ohdr->u.at) / sizeof(u32);
149 bth2 = e->psn & QIB_PSN_MASK;
150 e->sent = 1;
151 }
152 bth0 = qp->s_ack_state << 24;
153 break;
154
155 case OP(RDMA_READ_RESPONSE_FIRST):
156 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
157 /* FALLTHROUGH */
158 case OP(RDMA_READ_RESPONSE_MIDDLE):
159 qp->s_cur_sge = &qp->s_ack_rdma_sge;
160 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
161 if (qp->s_rdma_mr)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800162 rvt_get_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700163 len = qp->s_ack_rdma_sge.sge.sge_length;
164 if (len > pmtu)
165 len = pmtu;
166 else {
Brian Welty696513e2017-02-08 05:27:07 -0800167 ohdr->u.aeth = rvt_compute_aeth(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700168 hwords++;
169 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
170 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
171 e->sent = 1;
172 }
173 bth0 = qp->s_ack_state << 24;
174 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
175 break;
176
177 default:
178normal:
179 /*
180 * Send a regular ACK.
181 * Set the s_ack_state so we wait until after sending
182 * the ACK before setting s_ack_state to ACKNOWLEDGE
183 * (see above).
184 */
185 qp->s_ack_state = OP(SEND_ONLY);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800186 qp->s_flags &= ~RVT_S_ACK_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -0700187 qp->s_cur_sge = NULL;
188 if (qp->s_nak_state)
189 ohdr->u.aeth =
Brian Welty696513e2017-02-08 05:27:07 -0800190 cpu_to_be32((qp->r_msn & RVT_MSN_MASK) |
Ralph Campbellf9315512010-05-23 21:44:54 -0700191 (qp->s_nak_state <<
Brian Welty696513e2017-02-08 05:27:07 -0800192 RVT_AETH_CREDIT_SHIFT));
Ralph Campbellf9315512010-05-23 21:44:54 -0700193 else
Brian Welty696513e2017-02-08 05:27:07 -0800194 ohdr->u.aeth = rvt_compute_aeth(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700195 hwords++;
196 len = 0;
197 bth0 = OP(ACKNOWLEDGE) << 24;
198 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
199 }
200 qp->s_rdma_ack_cnt++;
201 qp->s_hdrwords = hwords;
202 qp->s_cur_size = len;
203 qib_make_ruc_header(qp, ohdr, bth0, bth2);
204 return 1;
205
206bail:
207 qp->s_ack_state = OP(ACKNOWLEDGE);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800208 qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
Ralph Campbellf9315512010-05-23 21:44:54 -0700209 return 0;
210}
211
212/**
213 * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
214 * @qp: a pointer to the QP
215 *
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800216 * Assumes the s_lock is held.
217 *
Ralph Campbellf9315512010-05-23 21:44:54 -0700218 * Return 1 if constructed; otherwise, return 0.
219 */
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700220int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
Ralph Campbellf9315512010-05-23 21:44:54 -0700221{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800222 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700223 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700224 struct ib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800225 struct rvt_sge_state *ss;
226 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700227 u32 hwords;
228 u32 len;
229 u32 bth0;
230 u32 bth2;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400231 u32 pmtu = qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -0700232 char newreq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700233 int ret = 0;
234 int delta;
235
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800236 ohdr = &priv->s_hdr->u.oth;
Ralph Campbellf9315512010-05-23 21:44:54 -0700237 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800238 ohdr = &priv->s_hdr->u.l.oth;
Ralph Campbellf9315512010-05-23 21:44:54 -0700239
Ralph Campbellf9315512010-05-23 21:44:54 -0700240 /* Sending responses has higher priority over sending requests. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800241 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
Ralph Campbellf9315512010-05-23 21:44:54 -0700242 qib_make_rc_ack(dev, qp, ohdr, pmtu))
243 goto done;
244
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800245 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
246 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
Ralph Campbellf9315512010-05-23 21:44:54 -0700247 goto bail;
248 /* We are in the error state, flush the work request. */
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800249 smp_read_barrier_depends(); /* see post_one_send() */
Mike Marciniszyneb04ff02017-02-08 05:26:08 -0800250 if (qp->s_last == READ_ONCE(qp->s_head))
Ralph Campbellf9315512010-05-23 21:44:54 -0700251 goto bail;
252 /* If DMAs are in progress, we can't flush immediately. */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800253 if (atomic_read(&priv->s_dma_busy)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800254 qp->s_flags |= RVT_S_WAIT_DMA;
Ralph Campbellf9315512010-05-23 21:44:54 -0700255 goto bail;
256 }
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800257 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Mike Marciniszyn30ab7e22011-11-04 08:26:52 -0400258 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
259 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
260 /* will get called again */
Ralph Campbellf9315512010-05-23 21:44:54 -0700261 goto done;
262 }
263
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800264 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700265 goto bail;
266
267 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
268 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800269 qp->s_flags |= RVT_S_WAIT_PSN;
Ralph Campbellf9315512010-05-23 21:44:54 -0700270 goto bail;
271 }
272 qp->s_sending_psn = qp->s_psn;
273 qp->s_sending_hpsn = qp->s_psn - 1;
274 }
275
276 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
277 hwords = 5;
278 bth0 = 0;
279
280 /* Send a request. */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800281 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
Ralph Campbellf9315512010-05-23 21:44:54 -0700282 switch (qp->s_state) {
283 default:
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800284 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700285 goto bail;
286 /*
287 * Resend an old request or start a new one.
288 *
289 * We keep track of the current SWQE so that
290 * we don't reset the "furthest progress" state
291 * if we need to back up.
292 */
293 newreq = 0;
294 if (qp->s_cur == qp->s_tail) {
295 /* Check if send work queue is empty. */
Mike Marciniszyneb04ff02017-02-08 05:26:08 -0800296 smp_read_barrier_depends(); /* see post_one_send() */
297 if (qp->s_tail == READ_ONCE(qp->s_head))
Ralph Campbellf9315512010-05-23 21:44:54 -0700298 goto bail;
299 /*
300 * If a fence is requested, wait for previous
301 * RDMA read and atomic operations to finish.
302 */
303 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
304 qp->s_num_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800305 qp->s_flags |= RVT_S_WAIT_FENCE;
Ralph Campbellf9315512010-05-23 21:44:54 -0700306 goto bail;
307 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700308 newreq = 1;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800309 qp->s_psn = wqe->psn;
Ralph Campbellf9315512010-05-23 21:44:54 -0700310 }
311 /*
312 * Note that we have to be careful not to modify the
313 * original work request since we may need to resend
314 * it.
315 */
316 len = wqe->length;
317 ss = &qp->s_sge;
318 bth2 = qp->s_psn & QIB_PSN_MASK;
319 switch (wqe->wr.opcode) {
320 case IB_WR_SEND:
321 case IB_WR_SEND_WITH_IMM:
322 /* If no credit, return. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800323 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
Brian Welty696513e2017-02-08 05:27:07 -0800324 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800325 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -0700326 goto bail;
327 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700328 if (len > pmtu) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700329 qp->s_state = OP(SEND_FIRST);
330 len = pmtu;
331 break;
332 }
333 if (wqe->wr.opcode == IB_WR_SEND)
334 qp->s_state = OP(SEND_ONLY);
335 else {
336 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
337 /* Immediate data comes after the BTH */
338 ohdr->u.imm_data = wqe->wr.ex.imm_data;
339 hwords += 1;
340 }
341 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
342 bth0 |= IB_BTH_SOLICITED;
343 bth2 |= IB_BTH_REQ_ACK;
344 if (++qp->s_cur == qp->s_size)
345 qp->s_cur = 0;
346 break;
347
348 case IB_WR_RDMA_WRITE:
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800349 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
Ralph Campbellf9315512010-05-23 21:44:54 -0700350 qp->s_lsn++;
351 /* FALLTHROUGH */
352 case IB_WR_RDMA_WRITE_WITH_IMM:
353 /* If no credit, return. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800354 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
Brian Welty696513e2017-02-08 05:27:07 -0800355 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800356 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -0700357 goto bail;
358 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100359
Ralph Campbellf9315512010-05-23 21:44:54 -0700360 ohdr->u.rc.reth.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100361 cpu_to_be64(wqe->rdma_wr.remote_addr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700362 ohdr->u.rc.reth.rkey =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100363 cpu_to_be32(wqe->rdma_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700364 ohdr->u.rc.reth.length = cpu_to_be32(len);
365 hwords += sizeof(struct ib_reth) / sizeof(u32);
Ralph Campbellf9315512010-05-23 21:44:54 -0700366 if (len > pmtu) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700367 qp->s_state = OP(RDMA_WRITE_FIRST);
368 len = pmtu;
369 break;
370 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100371 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
Ralph Campbellf9315512010-05-23 21:44:54 -0700372 qp->s_state = OP(RDMA_WRITE_ONLY);
373 else {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100374 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
Ralph Campbellf9315512010-05-23 21:44:54 -0700375 /* Immediate data comes after RETH */
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100376 ohdr->u.rc.imm_data =
377 wqe->rdma_wr.wr.ex.imm_data;
Ralph Campbellf9315512010-05-23 21:44:54 -0700378 hwords += 1;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100379 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
Ralph Campbellf9315512010-05-23 21:44:54 -0700380 bth0 |= IB_BTH_SOLICITED;
381 }
382 bth2 |= IB_BTH_REQ_ACK;
383 if (++qp->s_cur == qp->s_size)
384 qp->s_cur = 0;
385 break;
386
387 case IB_WR_RDMA_READ:
388 /*
389 * Don't allow more operations to be started
390 * than the QP limits allow.
391 */
392 if (newreq) {
393 if (qp->s_num_rd_atomic >=
394 qp->s_max_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800395 qp->s_flags |= RVT_S_WAIT_RDMAR;
Ralph Campbellf9315512010-05-23 21:44:54 -0700396 goto bail;
397 }
398 qp->s_num_rd_atomic++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800399 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
Ralph Campbellf9315512010-05-23 21:44:54 -0700400 qp->s_lsn++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700401 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100402
Ralph Campbellf9315512010-05-23 21:44:54 -0700403 ohdr->u.rc.reth.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100404 cpu_to_be64(wqe->rdma_wr.remote_addr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700405 ohdr->u.rc.reth.rkey =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100406 cpu_to_be32(wqe->rdma_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700407 ohdr->u.rc.reth.length = cpu_to_be32(len);
408 qp->s_state = OP(RDMA_READ_REQUEST);
409 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
410 ss = NULL;
411 len = 0;
412 bth2 |= IB_BTH_REQ_ACK;
413 if (++qp->s_cur == qp->s_size)
414 qp->s_cur = 0;
415 break;
416
417 case IB_WR_ATOMIC_CMP_AND_SWP:
418 case IB_WR_ATOMIC_FETCH_AND_ADD:
419 /*
420 * Don't allow more operations to be started
421 * than the QP limits allow.
422 */
423 if (newreq) {
424 if (qp->s_num_rd_atomic >=
425 qp->s_max_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800426 qp->s_flags |= RVT_S_WAIT_RDMAR;
Ralph Campbellf9315512010-05-23 21:44:54 -0700427 goto bail;
428 }
429 qp->s_num_rd_atomic++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800430 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
Ralph Campbellf9315512010-05-23 21:44:54 -0700431 qp->s_lsn++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700432 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100433 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700434 qp->s_state = OP(COMPARE_SWAP);
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700435 put_ib_ateth_swap(wqe->atomic_wr.swap,
436 &ohdr->u.atomic_eth);
437 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
438 &ohdr->u.atomic_eth);
Ralph Campbellf9315512010-05-23 21:44:54 -0700439 } else {
440 qp->s_state = OP(FETCH_ADD);
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700441 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
442 &ohdr->u.atomic_eth);
443 put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
Ralph Campbellf9315512010-05-23 21:44:54 -0700444 }
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700445 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
446 &ohdr->u.atomic_eth);
Ralph Campbellf9315512010-05-23 21:44:54 -0700447 ohdr->u.atomic_eth.rkey = cpu_to_be32(
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100448 wqe->atomic_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700449 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
450 ss = NULL;
451 len = 0;
452 bth2 |= IB_BTH_REQ_ACK;
453 if (++qp->s_cur == qp->s_size)
454 qp->s_cur = 0;
455 break;
456
457 default:
458 goto bail;
459 }
460 qp->s_sge.sge = wqe->sg_list[0];
461 qp->s_sge.sg_list = wqe->sg_list + 1;
462 qp->s_sge.num_sge = wqe->wr.num_sge;
463 qp->s_sge.total_len = wqe->length;
464 qp->s_len = wqe->length;
465 if (newreq) {
466 qp->s_tail++;
467 if (qp->s_tail >= qp->s_size)
468 qp->s_tail = 0;
469 }
470 if (wqe->wr.opcode == IB_WR_RDMA_READ)
471 qp->s_psn = wqe->lpsn + 1;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800472 else
Ralph Campbellf9315512010-05-23 21:44:54 -0700473 qp->s_psn++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700474 break;
475
476 case OP(RDMA_READ_RESPONSE_FIRST):
477 /*
478 * qp->s_state is normally set to the opcode of the
479 * last packet constructed for new requests and therefore
480 * is never set to RDMA read response.
481 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
482 * thread to indicate a SEND needs to be restarted from an
483 * earlier PSN without interferring with the sending thread.
484 * See qib_restart_rc().
485 */
486 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
487 /* FALLTHROUGH */
488 case OP(SEND_FIRST):
489 qp->s_state = OP(SEND_MIDDLE);
490 /* FALLTHROUGH */
491 case OP(SEND_MIDDLE):
492 bth2 = qp->s_psn++ & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700493 ss = &qp->s_sge;
494 len = qp->s_len;
495 if (len > pmtu) {
496 len = pmtu;
497 break;
498 }
499 if (wqe->wr.opcode == IB_WR_SEND)
500 qp->s_state = OP(SEND_LAST);
501 else {
502 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
503 /* Immediate data comes after the BTH */
504 ohdr->u.imm_data = wqe->wr.ex.imm_data;
505 hwords += 1;
506 }
507 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
508 bth0 |= IB_BTH_SOLICITED;
509 bth2 |= IB_BTH_REQ_ACK;
510 qp->s_cur++;
511 if (qp->s_cur >= qp->s_size)
512 qp->s_cur = 0;
513 break;
514
515 case OP(RDMA_READ_RESPONSE_LAST):
516 /*
517 * qp->s_state is normally set to the opcode of the
518 * last packet constructed for new requests and therefore
519 * is never set to RDMA read response.
520 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
521 * thread to indicate a RDMA write needs to be restarted from
522 * an earlier PSN without interferring with the sending thread.
523 * See qib_restart_rc().
524 */
525 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
526 /* FALLTHROUGH */
527 case OP(RDMA_WRITE_FIRST):
528 qp->s_state = OP(RDMA_WRITE_MIDDLE);
529 /* FALLTHROUGH */
530 case OP(RDMA_WRITE_MIDDLE):
531 bth2 = qp->s_psn++ & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700532 ss = &qp->s_sge;
533 len = qp->s_len;
534 if (len > pmtu) {
535 len = pmtu;
536 break;
537 }
538 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
539 qp->s_state = OP(RDMA_WRITE_LAST);
540 else {
541 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
542 /* Immediate data comes after the BTH */
543 ohdr->u.imm_data = wqe->wr.ex.imm_data;
544 hwords += 1;
545 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
546 bth0 |= IB_BTH_SOLICITED;
547 }
548 bth2 |= IB_BTH_REQ_ACK;
549 qp->s_cur++;
550 if (qp->s_cur >= qp->s_size)
551 qp->s_cur = 0;
552 break;
553
554 case OP(RDMA_READ_RESPONSE_MIDDLE):
555 /*
556 * qp->s_state is normally set to the opcode of the
557 * last packet constructed for new requests and therefore
558 * is never set to RDMA read response.
559 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
560 * thread to indicate a RDMA read needs to be restarted from
561 * an earlier PSN without interferring with the sending thread.
562 * See qib_restart_rc().
563 */
564 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
565 ohdr->u.rc.reth.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100566 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
Ralph Campbellf9315512010-05-23 21:44:54 -0700567 ohdr->u.rc.reth.rkey =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100568 cpu_to_be32(wqe->rdma_wr.rkey);
Ralph Campbellf9315512010-05-23 21:44:54 -0700569 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
570 qp->s_state = OP(RDMA_READ_REQUEST);
571 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
572 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
573 qp->s_psn = wqe->lpsn + 1;
574 ss = NULL;
575 len = 0;
576 qp->s_cur++;
577 if (qp->s_cur == qp->s_size)
578 qp->s_cur = 0;
579 break;
580 }
581 qp->s_sending_hpsn = bth2;
582 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
583 if (delta && delta % QIB_PSN_CREDIT == 0)
584 bth2 |= IB_BTH_REQ_ACK;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800585 if (qp->s_flags & RVT_S_SEND_ONE) {
586 qp->s_flags &= ~RVT_S_SEND_ONE;
587 qp->s_flags |= RVT_S_WAIT_ACK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700588 bth2 |= IB_BTH_REQ_ACK;
589 }
590 qp->s_len -= len;
591 qp->s_hdrwords = hwords;
592 qp->s_cur_sge = ss;
593 qp->s_cur_size = len;
594 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
595done:
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800596 return 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700597bail:
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800598 qp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700599 return ret;
600}
601
602/**
603 * qib_send_rc_ack - Construct an ACK packet and send it
604 * @qp: a pointer to the QP
605 *
606 * This is called from qib_rc_rcv() and qib_kreceive().
607 * Note that RDMA reads and atomics are handled in the
608 * send side QP state and tasklet.
609 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800610void qib_send_rc_ack(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700611{
612 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
613 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
614 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
615 u64 pbc;
616 u16 lrh0;
617 u32 bth0;
618 u32 hwords;
619 u32 pbufn;
620 u32 __iomem *piobuf;
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700621 struct ib_header hdr;
622 struct ib_other_headers *ohdr;
Ralph Campbellf9315512010-05-23 21:44:54 -0700623 u32 control;
624 unsigned long flags;
625
626 spin_lock_irqsave(&qp->s_lock, flags);
627
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800628 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700629 goto unlock;
630
631 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800632 if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
Ralph Campbellf9315512010-05-23 21:44:54 -0700633 goto queue_ack;
634
635 /* Construct the header with s_lock held so APM doesn't change it. */
636 ohdr = &hdr.u.oth;
637 lrh0 = QIB_LRH_BTH;
638 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
639 hwords = 6;
640 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
641 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
642 &qp->remote_ah_attr.grh, hwords, 0);
643 ohdr = &hdr.u.l.oth;
644 lrh0 = QIB_LRH_GRH;
645 }
646 /* read pkey_index w/o lock (its atomic) */
647 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
648 if (qp->s_mig_state == IB_MIG_MIGRATED)
649 bth0 |= IB_BTH_MIG_REQ;
650 if (qp->r_nak_state)
Brian Welty696513e2017-02-08 05:27:07 -0800651 ohdr->u.aeth = cpu_to_be32((qp->r_msn & RVT_MSN_MASK) |
Ralph Campbellf9315512010-05-23 21:44:54 -0700652 (qp->r_nak_state <<
Brian Welty696513e2017-02-08 05:27:07 -0800653 RVT_AETH_CREDIT_SHIFT));
Ralph Campbellf9315512010-05-23 21:44:54 -0700654 else
Brian Welty696513e2017-02-08 05:27:07 -0800655 ohdr->u.aeth = rvt_compute_aeth(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700656 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
657 qp->remote_ah_attr.sl << 4;
658 hdr.lrh[0] = cpu_to_be16(lrh0);
659 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
660 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
661 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
662 ohdr->bth[0] = cpu_to_be32(bth0);
663 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
664 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
665
666 spin_unlock_irqrestore(&qp->s_lock, flags);
667
668 /* Don't try to send ACKs if the link isn't ACTIVE */
669 if (!(ppd->lflags & QIBL_LINKACTIVE))
670 goto done;
671
672 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
673 qp->s_srate, lrh0 >> 12);
674 /* length is + 1 for the control dword */
675 pbc = ((u64) control << 32) | (hwords + 1);
676
677 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
678 if (!piobuf) {
679 /*
680 * We are out of PIO buffers at the moment.
681 * Pass responsibility for sending the ACK to the
682 * send tasklet so that when a PIO buffer becomes
683 * available, the ACK is sent ahead of other outgoing
684 * packets.
685 */
686 spin_lock_irqsave(&qp->s_lock, flags);
687 goto queue_ack;
688 }
689
690 /*
691 * Write the pbc.
692 * We have to flush after the PBC for correctness
693 * on some cpus or WC buffer can be written out of order.
694 */
695 writeq(pbc, piobuf);
696
697 if (dd->flags & QIB_PIO_FLUSH_WC) {
698 u32 *hdrp = (u32 *) &hdr;
699
700 qib_flush_wc();
701 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
702 qib_flush_wc();
703 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
704 } else
705 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
706
707 if (dd->flags & QIB_USE_SPCL_TRIG) {
708 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
709
710 qib_flush_wc();
711 __raw_writel(0xaebecede, piobuf + spcl_off);
712 }
713
714 qib_flush_wc();
715 qib_sendbuf_done(dd, pbufn);
716
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500717 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
Ralph Campbellf9315512010-05-23 21:44:54 -0700718 goto done;
719
720queue_ack:
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800721 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800722 this_cpu_inc(*ibp->rvp.rc_qacks);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800723 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -0700724 qp->s_nak_state = qp->r_nak_state;
725 qp->s_ack_psn = qp->r_ack_psn;
726
727 /* Schedule the send tasklet. */
728 qib_schedule_send(qp);
729 }
730unlock:
731 spin_unlock_irqrestore(&qp->s_lock, flags);
732done:
733 return;
734}
735
736/**
737 * reset_psn - reset the QP state to send starting from PSN
738 * @qp: the QP
739 * @psn: the packet sequence number to restart at
740 *
741 * This is called from qib_rc_rcv() to process an incoming RC ACK
742 * for the given QP.
743 * Called at interrupt level with the QP s_lock held.
744 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800745static void reset_psn(struct rvt_qp *qp, u32 psn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700746{
747 u32 n = qp->s_acked;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800748 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
Ralph Campbellf9315512010-05-23 21:44:54 -0700749 u32 opcode;
750
751 qp->s_cur = n;
752
753 /*
754 * If we are starting the request from the beginning,
755 * let the normal send code handle initialization.
756 */
757 if (qib_cmp24(psn, wqe->psn) <= 0) {
758 qp->s_state = OP(SEND_LAST);
759 goto done;
760 }
761
762 /* Find the work request opcode corresponding to the given PSN. */
763 opcode = wqe->wr.opcode;
764 for (;;) {
765 int diff;
766
767 if (++n == qp->s_size)
768 n = 0;
769 if (n == qp->s_tail)
770 break;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800771 wqe = rvt_get_swqe_ptr(qp, n);
Ralph Campbellf9315512010-05-23 21:44:54 -0700772 diff = qib_cmp24(psn, wqe->psn);
773 if (diff < 0)
774 break;
775 qp->s_cur = n;
776 /*
777 * If we are starting the request from the beginning,
778 * let the normal send code handle initialization.
779 */
780 if (diff == 0) {
781 qp->s_state = OP(SEND_LAST);
782 goto done;
783 }
784 opcode = wqe->wr.opcode;
785 }
786
787 /*
788 * Set the state to restart in the middle of a request.
789 * Don't change the s_sge, s_cur_sge, or s_cur_size.
790 * See qib_make_rc_req().
791 */
792 switch (opcode) {
793 case IB_WR_SEND:
794 case IB_WR_SEND_WITH_IMM:
795 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
796 break;
797
798 case IB_WR_RDMA_WRITE:
799 case IB_WR_RDMA_WRITE_WITH_IMM:
800 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
801 break;
802
803 case IB_WR_RDMA_READ:
804 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
805 break;
806
807 default:
808 /*
809 * This case shouldn't happen since its only
810 * one PSN per req.
811 */
812 qp->s_state = OP(SEND_LAST);
813 }
814done:
815 qp->s_psn = psn;
816 /*
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800817 * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
Ralph Campbellf9315512010-05-23 21:44:54 -0700818 * asynchronously before the send tasklet can get scheduled.
819 * Doing it in qib_make_rc_req() is too late.
820 */
821 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
822 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800823 qp->s_flags |= RVT_S_WAIT_PSN;
Ralph Campbellf9315512010-05-23 21:44:54 -0700824}
825
826/*
827 * Back up requester to resend the last un-ACKed request.
Ralph Campbella5210c12010-08-02 22:39:30 +0000828 * The QP r_lock and s_lock should be held and interrupts disabled.
Ralph Campbellf9315512010-05-23 21:44:54 -0700829 */
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -0800830void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
Ralph Campbellf9315512010-05-23 21:44:54 -0700831{
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800832 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -0700833 struct qib_ibport *ibp;
834
835 if (qp->s_retry == 0) {
836 if (qp->s_mig_state == IB_MIG_ARMED) {
837 qib_migrate_qp(qp);
838 qp->s_retry = qp->s_retry_cnt;
839 } else if (qp->s_last == qp->s_acked) {
840 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
Harish Chegondi70696ea2016-02-03 14:20:27 -0800841 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -0700842 return;
843 } else /* XXX need to handle delayed completion */
844 return;
845 } else
846 qp->s_retry--;
847
848 ibp = to_iport(qp->ibqp.device, qp->port_num);
849 if (wqe->wr.opcode == IB_WR_RDMA_READ)
Harish Chegondif24a6d42016-01-22 12:56:02 -0800850 ibp->rvp.n_rc_resends++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700851 else
Harish Chegondif24a6d42016-01-22 12:56:02 -0800852 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700853
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800854 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
855 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
856 RVT_S_WAIT_ACK);
Ralph Campbellf9315512010-05-23 21:44:54 -0700857 if (wait)
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800858 qp->s_flags |= RVT_S_SEND_ONE;
Ralph Campbellf9315512010-05-23 21:44:54 -0700859 reset_psn(qp, psn);
860}
861
862/*
Ralph Campbellf9315512010-05-23 21:44:54 -0700863 * Set qp->s_sending_psn to the next PSN after the given one.
864 * This would be psn+1 except when RDMA reads are present.
865 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800866static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700867{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800868 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700869 u32 n = qp->s_last;
870
871 /* Find the work request corresponding to the given PSN. */
872 for (;;) {
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800873 wqe = rvt_get_swqe_ptr(qp, n);
Ralph Campbellf9315512010-05-23 21:44:54 -0700874 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
875 if (wqe->wr.opcode == IB_WR_RDMA_READ)
876 qp->s_sending_psn = wqe->lpsn + 1;
877 else
878 qp->s_sending_psn = psn + 1;
879 break;
880 }
881 if (++n == qp->s_size)
882 n = 0;
883 if (n == qp->s_tail)
884 break;
885 }
886}
887
888/*
889 * This should be called with the QP s_lock held and interrupts disabled.
890 */
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700891void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
Ralph Campbellf9315512010-05-23 21:44:54 -0700892{
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700893 struct ib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800894 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700895 u32 opcode;
896 u32 psn;
897
Mike Marciniszynf9215b52017-02-08 05:27:49 -0800898 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700899 return;
900
901 /* Find out where the BTH is */
902 if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
903 ohdr = &hdr->u.oth;
904 else
905 ohdr = &hdr->u.l.oth;
906
907 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
908 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
909 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
910 WARN_ON(!qp->s_rdma_ack_cnt);
911 qp->s_rdma_ack_cnt--;
912 return;
913 }
914
915 psn = be32_to_cpu(ohdr->bth[2]);
916 reset_sending_psn(qp, psn);
917
918 /*
919 * Start timer after a packet requesting an ACK has been sent and
920 * there are still requests that haven't been acked.
921 */
922 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800923 !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800924 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -0800925 rvt_add_retry_timer(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700926
927 while (qp->s_last != qp->s_acked) {
Mike Marciniszynee845412016-02-04 11:03:28 -0800928 u32 s_last;
929
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800930 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Ralph Campbellf9315512010-05-23 21:44:54 -0700931 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
932 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
933 break;
Mike Marciniszynee845412016-02-04 11:03:28 -0800934 s_last = qp->s_last;
935 if (++s_last >= qp->s_size)
936 s_last = 0;
937 qp->s_last = s_last;
938 /* see post_send() */
939 barrier();
Mike Marciniszync64607a2016-12-07 19:34:31 -0800940 rvt_put_swqe(wqe);
Mike Marciniszyn0771da52016-12-07 19:34:12 -0800941 rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
Ralph Campbellf9315512010-05-23 21:44:54 -0700942 }
943 /*
944 * If we were waiting for sends to complete before resending,
945 * and they are now complete, restart sending.
946 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800947 if (qp->s_flags & RVT_S_WAIT_PSN &&
Ralph Campbellf9315512010-05-23 21:44:54 -0700948 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800949 qp->s_flags &= ~RVT_S_WAIT_PSN;
Ralph Campbellf9315512010-05-23 21:44:54 -0700950 qp->s_sending_psn = qp->s_psn;
951 qp->s_sending_hpsn = qp->s_psn - 1;
952 qib_schedule_send(qp);
953 }
954}
955
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800956static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700957{
958 qp->s_last_psn = psn;
959}
960
961/*
962 * Generate a SWQE completion.
963 * This is similar to qib_send_complete but has to check to be sure
964 * that the SGEs are not being referenced if the SWQE is being resent.
965 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800966static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
967 struct rvt_swqe *wqe,
Ralph Campbellf9315512010-05-23 21:44:54 -0700968 struct qib_ibport *ibp)
969{
Ralph Campbellf9315512010-05-23 21:44:54 -0700970 /*
971 * Don't decrement refcount and don't generate a
972 * completion if the SWQE is being resent until the send
973 * is finished.
974 */
975 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
976 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
Mike Marciniszynee845412016-02-04 11:03:28 -0800977 u32 s_last;
978
Mike Marciniszync64607a2016-12-07 19:34:31 -0800979 rvt_put_swqe(wqe);
Mike Marciniszynee845412016-02-04 11:03:28 -0800980 s_last = qp->s_last;
981 if (++s_last >= qp->s_size)
982 s_last = 0;
983 qp->s_last = s_last;
984 /* see post_send() */
985 barrier();
Mike Marciniszyn0771da52016-12-07 19:34:12 -0800986 rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
Ralph Campbellf9315512010-05-23 21:44:54 -0700987 } else
Harish Chegondif24a6d42016-01-22 12:56:02 -0800988 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700989
990 qp->s_retry = qp->s_retry_cnt;
991 update_last_psn(qp, wqe->lpsn);
992
993 /*
994 * If we are completing a request which is in the process of
995 * being resent, we can stop resending it since we know the
996 * responder has already seen it.
997 */
998 if (qp->s_acked == qp->s_cur) {
999 if (++qp->s_cur >= qp->s_size)
1000 qp->s_cur = 0;
1001 qp->s_acked = qp->s_cur;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001002 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
Ralph Campbellf9315512010-05-23 21:44:54 -07001003 if (qp->s_acked != qp->s_tail) {
1004 qp->s_state = OP(SEND_LAST);
1005 qp->s_psn = wqe->psn;
1006 }
1007 } else {
1008 if (++qp->s_acked >= qp->s_size)
1009 qp->s_acked = 0;
1010 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1011 qp->s_draining = 0;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001012 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001013 }
1014 return wqe;
1015}
1016
1017/**
1018 * do_rc_ack - process an incoming RC ACK
1019 * @qp: the QP the ACK came in on
1020 * @psn: the packet sequence number of the ACK
1021 * @opcode: the opcode of the request that resulted in the ACK
1022 *
1023 * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1024 * for the given QP.
1025 * Called at interrupt level with the QP s_lock held.
1026 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1027 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001028static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
Ralph Campbellf9315512010-05-23 21:44:54 -07001029 u64 val, struct qib_ctxtdata *rcd)
1030{
1031 struct qib_ibport *ibp;
1032 enum ib_wc_status status;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001033 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -07001034 int ret = 0;
1035 u32 ack_psn;
1036 int diff;
1037
Ralph Campbellf9315512010-05-23 21:44:54 -07001038 /*
1039 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1040 * requests and implicitly NAK RDMA read and atomic requests issued
1041 * before the NAK'ed request. The MSN won't include the NAK'ed
1042 * request but will include an ACK'ed request(s).
1043 */
1044 ack_psn = psn;
Brian Welty696513e2017-02-08 05:27:07 -08001045 if (aeth >> RVT_AETH_NAK_SHIFT)
Ralph Campbellf9315512010-05-23 21:44:54 -07001046 ack_psn--;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001047 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001048 ibp = to_iport(qp->ibqp.device, qp->port_num);
1049
1050 /*
1051 * The MSN might be for a later WQE than the PSN indicates so
1052 * only complete WQEs that the PSN finishes.
1053 */
1054 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1055 /*
1056 * RDMA_READ_RESPONSE_ONLY is a special case since
1057 * we want to generate completion events for everything
1058 * before the RDMA read, copy the data, then generate
1059 * the completion for the read.
1060 */
1061 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1062 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1063 diff == 0) {
1064 ret = 1;
1065 goto bail;
1066 }
1067 /*
1068 * If this request is a RDMA read or atomic, and the ACK is
1069 * for a later operation, this ACK NAKs the RDMA read or
1070 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1071 * can ACK a RDMA read and likewise for atomic ops. Note
1072 * that the NAK case can only happen if relaxed ordering is
1073 * used and requests are sent after an RDMA read or atomic
1074 * is sent but before the response is received.
1075 */
1076 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1077 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1078 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1079 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1080 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1081 /* Retry this request. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001082 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1083 qp->r_flags |= RVT_R_RDMAR_SEQ;
Ralph Campbellf9315512010-05-23 21:44:54 -07001084 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1085 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001086 qp->r_flags |= RVT_R_RSP_SEND;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07001087 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001088 list_add_tail(&qp->rspwait,
1089 &rcd->qp_wait_list);
1090 }
1091 }
1092 /*
1093 * No need to process the ACK/NAK since we are
1094 * restarting an earlier request.
1095 */
1096 goto bail;
1097 }
1098 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1099 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1100 u64 *vaddr = wqe->sg_list[0].vaddr;
1101 *vaddr = val;
1102 }
1103 if (qp->s_num_rd_atomic &&
1104 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1105 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1106 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1107 qp->s_num_rd_atomic--;
1108 /* Restart sending task if fence is complete */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001109 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
Ralph Campbellf9315512010-05-23 21:44:54 -07001110 !qp->s_num_rd_atomic) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001111 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1112 RVT_S_WAIT_ACK);
Ralph Campbellf9315512010-05-23 21:44:54 -07001113 qib_schedule_send(qp);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001114 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1115 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1116 RVT_S_WAIT_ACK);
Ralph Campbellf9315512010-05-23 21:44:54 -07001117 qib_schedule_send(qp);
1118 }
1119 }
1120 wqe = do_rc_completion(qp, wqe, ibp);
1121 if (qp->s_acked == qp->s_tail)
1122 break;
1123 }
1124
Brian Welty696513e2017-02-08 05:27:07 -08001125 switch (aeth >> RVT_AETH_NAK_SHIFT) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001126 case 0: /* ACK */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001127 this_cpu_inc(*ibp->rvp.rc_acks);
Ralph Campbellf9315512010-05-23 21:44:54 -07001128 if (qp->s_acked != qp->s_tail) {
1129 /*
1130 * We are expecting more ACKs so
1131 * reset the retransmit timer.
1132 */
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -08001133 rvt_mod_retry_timer(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001134 /*
1135 * We can stop resending the earlier packets and
1136 * continue with the next packet the receiver wants.
1137 */
1138 if (qib_cmp24(qp->s_psn, psn) <= 0)
1139 reset_psn(qp, psn + 1);
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -08001140 } else {
1141 /* No more acks - kill all timers */
1142 rvt_stop_rc_timers(qp);
1143 if (qib_cmp24(qp->s_psn, psn) <= 0) {
1144 qp->s_state = OP(SEND_LAST);
1145 qp->s_psn = psn + 1;
1146 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001147 }
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001148 if (qp->s_flags & RVT_S_WAIT_ACK) {
1149 qp->s_flags &= ~RVT_S_WAIT_ACK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001150 qib_schedule_send(qp);
1151 }
Brian Welty696513e2017-02-08 05:27:07 -08001152 rvt_get_credit(qp, aeth);
Ralph Campbellf9315512010-05-23 21:44:54 -07001153 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1154 qp->s_retry = qp->s_retry_cnt;
1155 update_last_psn(qp, psn);
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -08001156 return 1;
Ralph Campbellf9315512010-05-23 21:44:54 -07001157
1158 case 1: /* RNR NAK */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001159 ibp->rvp.n_rnr_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001160 if (qp->s_acked == qp->s_tail)
1161 goto bail;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001162 if (qp->s_flags & RVT_S_WAIT_RNR)
Ralph Campbellf9315512010-05-23 21:44:54 -07001163 goto bail;
1164 if (qp->s_rnr_retry == 0) {
1165 status = IB_WC_RNR_RETRY_EXC_ERR;
1166 goto class_b;
1167 }
1168 if (qp->s_rnr_retry_cnt < 7)
1169 qp->s_rnr_retry--;
1170
1171 /* The last valid PSN is the previous PSN. */
1172 update_last_psn(qp, psn - 1);
1173
Harish Chegondif24a6d42016-01-22 12:56:02 -08001174 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001175
1176 reset_psn(qp, psn);
1177
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001178 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -08001179 rvt_stop_rc_timers(qp);
1180 rvt_add_rnr_timer(qp, aeth);
1181 return 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001182
1183 case 3: /* NAK */
1184 if (qp->s_acked == qp->s_tail)
1185 goto bail;
1186 /* The last valid PSN is the previous PSN. */
1187 update_last_psn(qp, psn - 1);
Brian Welty696513e2017-02-08 05:27:07 -08001188 switch ((aeth >> RVT_AETH_CREDIT_SHIFT) &
1189 RVT_AETH_CREDIT_MASK) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001190 case 0: /* PSN sequence error */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001191 ibp->rvp.n_seq_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001192 /*
1193 * Back up to the responder's expected PSN.
1194 * Note that we might get a NAK in the middle of an
1195 * RDMA READ response which terminates the RDMA
1196 * READ.
1197 */
1198 qib_restart_rc(qp, psn, 0);
1199 qib_schedule_send(qp);
1200 break;
1201
1202 case 1: /* Invalid Request */
1203 status = IB_WC_REM_INV_REQ_ERR;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001204 ibp->rvp.n_other_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001205 goto class_b;
1206
1207 case 2: /* Remote Access Error */
1208 status = IB_WC_REM_ACCESS_ERR;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001209 ibp->rvp.n_other_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001210 goto class_b;
1211
1212 case 3: /* Remote Operation Error */
1213 status = IB_WC_REM_OP_ERR;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001214 ibp->rvp.n_other_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001215class_b:
1216 if (qp->s_last == qp->s_acked) {
1217 qib_send_complete(qp, wqe, status);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001218 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001219 }
1220 break;
1221
1222 default:
1223 /* Ignore other reserved NAK error codes */
1224 goto reserved;
1225 }
1226 qp->s_retry = qp->s_retry_cnt;
1227 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1228 goto bail;
1229
1230 default: /* 2: reserved */
1231reserved:
1232 /* Ignore reserved NAK codes. */
1233 goto bail;
1234 }
1235
1236bail:
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -08001237 rvt_stop_rc_timers(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001238 return ret;
1239}
1240
1241/*
1242 * We have seen an out of sequence RDMA read middle or last packet.
1243 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1244 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001245static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
Ralph Campbellf9315512010-05-23 21:44:54 -07001246 struct qib_ctxtdata *rcd)
1247{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001248 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -07001249
1250 /* Remove QP from retry timer */
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -08001251 rvt_stop_rc_timers(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001252
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001253 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001254
1255 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1256 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1257 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1258 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1259 break;
1260 wqe = do_rc_completion(qp, wqe, ibp);
1261 }
1262
Harish Chegondif24a6d42016-01-22 12:56:02 -08001263 ibp->rvp.n_rdma_seq++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001264 qp->r_flags |= RVT_R_RDMAR_SEQ;
Ralph Campbellf9315512010-05-23 21:44:54 -07001265 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1266 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001267 qp->r_flags |= RVT_R_RSP_SEND;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07001268 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001269 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1270 }
1271}
1272
1273/**
1274 * qib_rc_rcv_resp - process an incoming RC response packet
1275 * @ibp: the port this packet came in on
1276 * @ohdr: the other headers for this packet
1277 * @data: the packet data
1278 * @tlen: the packet length
1279 * @qp: the QP for this packet
1280 * @opcode: the opcode for this packet
1281 * @psn: the packet sequence number for this packet
1282 * @hdrsize: the header length
1283 * @pmtu: the path MTU
1284 *
1285 * This is called from qib_rc_rcv() to process an incoming RC response
1286 * packet for the given QP.
1287 * Called at interrupt level.
1288 */
1289static void qib_rc_rcv_resp(struct qib_ibport *ibp,
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001290 struct ib_other_headers *ohdr,
Ralph Campbellf9315512010-05-23 21:44:54 -07001291 void *data, u32 tlen,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001292 struct rvt_qp *qp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001293 u32 opcode,
1294 u32 psn, u32 hdrsize, u32 pmtu,
1295 struct qib_ctxtdata *rcd)
1296{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001297 struct rvt_swqe *wqe;
Mike Marciniszyndd04e432011-01-10 17:42:22 -08001298 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001299 enum ib_wc_status status;
1300 unsigned long flags;
1301 int diff;
1302 u32 pad;
1303 u32 aeth;
1304 u64 val;
1305
Mike Marciniszyndd04e432011-01-10 17:42:22 -08001306 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1307 /*
1308 * If ACK'd PSN on SDMA busy list try to make progress to
1309 * reclaim SDMA credits.
1310 */
1311 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1312 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1313
1314 /*
1315 * If send tasklet not running attempt to progress
1316 * SDMA queue.
1317 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001318 if (!(qp->s_flags & RVT_S_BUSY)) {
Mike Marciniszyndd04e432011-01-10 17:42:22 -08001319 /* Acquire SDMA Lock */
1320 spin_lock_irqsave(&ppd->sdma_lock, flags);
1321 /* Invoke sdma make progress */
1322 qib_sdma_make_progress(ppd);
1323 /* Release SDMA Lock */
1324 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1325 }
1326 }
1327 }
1328
Ralph Campbellf9315512010-05-23 21:44:54 -07001329 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001330 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
Mike Marciniszyn414ed902011-02-10 14:11:28 +00001331 goto ack_done;
Ralph Campbellf9315512010-05-23 21:44:54 -07001332
Ralph Campbellf9315512010-05-23 21:44:54 -07001333 /* Ignore invalid responses. */
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001334 smp_read_barrier_depends(); /* see post_one_send */
Mike Marciniszyneb04ff02017-02-08 05:26:08 -08001335 if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
Ralph Campbellf9315512010-05-23 21:44:54 -07001336 goto ack_done;
1337
1338 /* Ignore duplicate responses. */
1339 diff = qib_cmp24(psn, qp->s_last_psn);
1340 if (unlikely(diff <= 0)) {
1341 /* Update credits for "ghost" ACKs */
1342 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1343 aeth = be32_to_cpu(ohdr->u.aeth);
Brian Welty696513e2017-02-08 05:27:07 -08001344 if ((aeth >> RVT_AETH_NAK_SHIFT) == 0)
1345 rvt_get_credit(qp, aeth);
Ralph Campbellf9315512010-05-23 21:44:54 -07001346 }
1347 goto ack_done;
1348 }
1349
1350 /*
1351 * Skip everything other than the PSN we expect, if we are waiting
1352 * for a reply to a restarted RDMA read or atomic op.
1353 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001354 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001355 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1356 goto ack_done;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001357 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
Ralph Campbellf9315512010-05-23 21:44:54 -07001358 }
1359
1360 if (unlikely(qp->s_acked == qp->s_tail))
1361 goto ack_done;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001362 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001363 status = IB_WC_SUCCESS;
1364
1365 switch (opcode) {
1366 case OP(ACKNOWLEDGE):
1367 case OP(ATOMIC_ACKNOWLEDGE):
1368 case OP(RDMA_READ_RESPONSE_FIRST):
1369 aeth = be32_to_cpu(ohdr->u.aeth);
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001370 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1371 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1372 else
Ralph Campbellf9315512010-05-23 21:44:54 -07001373 val = 0;
1374 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1375 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1376 goto ack_done;
1377 hdrsize += 4;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001378 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001379 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1380 goto ack_op_err;
1381 /*
1382 * If this is a response to a resent RDMA read, we
1383 * have to be careful to copy the data to the right
1384 * location.
1385 */
1386 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1387 wqe, psn, pmtu);
1388 goto read_middle;
1389
1390 case OP(RDMA_READ_RESPONSE_MIDDLE):
1391 /* no AETH, no ACK */
1392 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1393 goto ack_seq_err;
1394 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1395 goto ack_op_err;
1396read_middle:
1397 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1398 goto ack_len_err;
1399 if (unlikely(pmtu >= qp->s_rdma_read_len))
1400 goto ack_len_err;
1401
1402 /*
1403 * We got a response so update the timeout.
1404 * 4.096 usec. * (1 << qp->timeout)
1405 */
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -08001406 rvt_mod_retry_timer(qp);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001407 if (qp->s_flags & RVT_S_WAIT_ACK) {
1408 qp->s_flags &= ~RVT_S_WAIT_ACK;
Ralph Campbellf9315512010-05-23 21:44:54 -07001409 qib_schedule_send(qp);
1410 }
1411
1412 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1413 qp->s_retry = qp->s_retry_cnt;
1414
1415 /*
1416 * Update the RDMA receive state but do the copy w/o
1417 * holding the locks and blocking interrupts.
1418 */
1419 qp->s_rdma_read_len -= pmtu;
1420 update_last_psn(qp, psn);
1421 spin_unlock_irqrestore(&qp->s_lock, flags);
1422 qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
1423 goto bail;
1424
1425 case OP(RDMA_READ_RESPONSE_ONLY):
1426 aeth = be32_to_cpu(ohdr->u.aeth);
1427 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1428 goto ack_done;
1429 /* Get the number of bytes the message was padded by. */
1430 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1431 /*
1432 * Check that the data size is >= 0 && <= pmtu.
1433 * Remember to account for the AETH header (4) and
1434 * ICRC (4).
1435 */
1436 if (unlikely(tlen < (hdrsize + pad + 8)))
1437 goto ack_len_err;
1438 /*
1439 * If this is a response to a resent RDMA read, we
1440 * have to be careful to copy the data to the right
1441 * location.
1442 */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001443 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
Ralph Campbellf9315512010-05-23 21:44:54 -07001444 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1445 wqe, psn, pmtu);
1446 goto read_last;
1447
1448 case OP(RDMA_READ_RESPONSE_LAST):
1449 /* ACKs READ req. */
1450 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1451 goto ack_seq_err;
1452 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1453 goto ack_op_err;
1454 /* Get the number of bytes the message was padded by. */
1455 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1456 /*
1457 * Check that the data size is >= 1 && <= pmtu.
1458 * Remember to account for the AETH header (4) and
1459 * ICRC (4).
1460 */
1461 if (unlikely(tlen <= (hdrsize + pad + 8)))
1462 goto ack_len_err;
1463read_last:
1464 tlen -= hdrsize + pad + 8;
1465 if (unlikely(tlen != qp->s_rdma_read_len))
1466 goto ack_len_err;
1467 aeth = be32_to_cpu(ohdr->u.aeth);
1468 qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
1469 WARN_ON(qp->s_rdma_read_sge.num_sge);
1470 (void) do_rc_ack(qp, aeth, psn,
1471 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1472 goto ack_done;
1473 }
1474
1475ack_op_err:
1476 status = IB_WC_LOC_QP_OP_ERR;
1477 goto ack_err;
1478
1479ack_seq_err:
1480 rdma_seq_err(qp, ibp, psn, rcd);
1481 goto ack_done;
1482
1483ack_len_err:
1484 status = IB_WC_LOC_LEN_ERR;
1485ack_err:
1486 if (qp->s_last == qp->s_acked) {
1487 qib_send_complete(qp, wqe, status);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001488 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -07001489 }
1490ack_done:
1491 spin_unlock_irqrestore(&qp->s_lock, flags);
1492bail:
1493 return;
1494}
1495
1496/**
1497 * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1498 * @ohdr: the other headers for this packet
1499 * @data: the packet data
1500 * @qp: the QP for this packet
1501 * @opcode: the opcode for this packet
1502 * @psn: the packet sequence number for this packet
1503 * @diff: the difference between the PSN and the expected PSN
1504 *
1505 * This is called from qib_rc_rcv() to process an unexpected
1506 * incoming RC packet for the given QP.
1507 * Called at interrupt level.
1508 * Return 1 if no more processing is needed; otherwise return 0 to
1509 * schedule a response to be sent.
1510 */
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001511static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
Ralph Campbellf9315512010-05-23 21:44:54 -07001512 void *data,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001513 struct rvt_qp *qp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001514 u32 opcode,
1515 u32 psn,
1516 int diff,
1517 struct qib_ctxtdata *rcd)
1518{
1519 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001520 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -07001521 unsigned long flags;
1522 u8 i, prev;
1523 int old_req;
1524
1525 if (diff > 0) {
1526 /*
1527 * Packet sequence error.
1528 * A NAK will ACK earlier sends and RDMA writes.
1529 * Don't queue the NAK if we already sent one.
1530 */
1531 if (!qp->r_nak_state) {
Harish Chegondif24a6d42016-01-22 12:56:02 -08001532 ibp->rvp.n_rc_seqnak++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001533 qp->r_nak_state = IB_NAK_PSN_ERROR;
1534 /* Use the expected PSN. */
1535 qp->r_ack_psn = qp->r_psn;
1536 /*
1537 * Wait to send the sequence NAK until all packets
1538 * in the receive queue have been processed.
1539 * Otherwise, we end up propagating congestion.
1540 */
1541 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001542 qp->r_flags |= RVT_R_RSP_NAK;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07001543 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001544 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1545 }
1546 }
1547 goto done;
1548 }
1549
1550 /*
1551 * Handle a duplicate request. Don't re-execute SEND, RDMA
1552 * write or atomic op. Don't NAK errors, just silently drop
1553 * the duplicate request. Note that r_sge, r_len, and
1554 * r_rcv_len may be in use so don't modify them.
1555 *
1556 * We are supposed to ACK the earliest duplicate PSN but we
1557 * can coalesce an outstanding duplicate ACK. We have to
1558 * send the earliest so that RDMA reads can be restarted at
1559 * the requester's expected PSN.
1560 *
1561 * First, find where this duplicate PSN falls within the
1562 * ACKs previously sent.
1563 * old_req is true if there is an older response that is scheduled
1564 * to be sent before sending this one.
1565 */
1566 e = NULL;
1567 old_req = 1;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001568 ibp->rvp.n_rc_dupreq++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001569
1570 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001571
1572 for (i = qp->r_head_ack_queue; ; i = prev) {
1573 if (i == qp->s_tail_ack_queue)
1574 old_req = 0;
1575 if (i)
1576 prev = i - 1;
1577 else
1578 prev = QIB_MAX_RDMA_ATOMIC;
1579 if (prev == qp->r_head_ack_queue) {
1580 e = NULL;
1581 break;
1582 }
1583 e = &qp->s_ack_queue[prev];
1584 if (!e->opcode) {
1585 e = NULL;
1586 break;
1587 }
1588 if (qib_cmp24(psn, e->psn) >= 0) {
1589 if (prev == qp->s_tail_ack_queue &&
1590 qib_cmp24(psn, e->lpsn) <= 0)
1591 old_req = 0;
1592 break;
1593 }
1594 }
1595 switch (opcode) {
1596 case OP(RDMA_READ_REQUEST): {
1597 struct ib_reth *reth;
1598 u32 offset;
1599 u32 len;
1600
1601 /*
1602 * If we didn't find the RDMA read request in the ack queue,
1603 * we can ignore this request.
1604 */
1605 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1606 goto unlock_done;
1607 /* RETH comes after BTH */
1608 reth = &ohdr->u.rc.reth;
1609 /*
1610 * Address range must be a subset of the original
1611 * request and start on pmtu boundaries.
1612 * We reuse the old ack_queue slot since the requester
1613 * should not back up and request an earlier PSN for the
1614 * same request.
1615 */
1616 offset = ((psn - e->psn) & QIB_PSN_MASK) *
Mike Marciniszyncc6ea132011-09-23 13:16:34 -04001617 qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -07001618 len = be32_to_cpu(reth->length);
1619 if (unlikely(offset + len != e->rdma_sge.sge_length))
1620 goto unlock_done;
1621 if (e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001622 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001623 e->rdma_sge.mr = NULL;
1624 }
1625 if (len != 0) {
1626 u32 rkey = be32_to_cpu(reth->rkey);
1627 u64 vaddr = be64_to_cpu(reth->vaddr);
1628 int ok;
1629
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001630 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
Ralph Campbellf9315512010-05-23 21:44:54 -07001631 IB_ACCESS_REMOTE_READ);
1632 if (unlikely(!ok))
1633 goto unlock_done;
1634 } else {
1635 e->rdma_sge.vaddr = NULL;
1636 e->rdma_sge.length = 0;
1637 e->rdma_sge.sge_length = 0;
1638 }
1639 e->psn = psn;
1640 if (old_req)
1641 goto unlock_done;
1642 qp->s_tail_ack_queue = prev;
1643 break;
1644 }
1645
1646 case OP(COMPARE_SWAP):
1647 case OP(FETCH_ADD): {
1648 /*
1649 * If we didn't find the atomic request in the ack queue
1650 * or the send tasklet is already backed up to send an
1651 * earlier entry, we can ignore this request.
1652 */
1653 if (!e || e->opcode != (u8) opcode || old_req)
1654 goto unlock_done;
1655 qp->s_tail_ack_queue = prev;
1656 break;
1657 }
1658
1659 default:
1660 /*
1661 * Ignore this operation if it doesn't request an ACK
1662 * or an earlier RDMA read or atomic is going to be resent.
1663 */
1664 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1665 goto unlock_done;
1666 /*
1667 * Resend the most recent ACK if this request is
1668 * after all the previous RDMA reads and atomics.
1669 */
1670 if (i == qp->r_head_ack_queue) {
1671 spin_unlock_irqrestore(&qp->s_lock, flags);
1672 qp->r_nak_state = 0;
1673 qp->r_ack_psn = qp->r_psn - 1;
1674 goto send_ack;
1675 }
1676 /*
1677 * Try to send a simple ACK to work around a Mellanox bug
1678 * which doesn't accept a RDMA read response or atomic
1679 * response as an ACK for earlier SENDs or RDMA writes.
1680 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001681 if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001682 spin_unlock_irqrestore(&qp->s_lock, flags);
1683 qp->r_nak_state = 0;
1684 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1685 goto send_ack;
1686 }
1687 /*
1688 * Resend the RDMA read or atomic op which
1689 * ACKs this duplicate request.
1690 */
1691 qp->s_tail_ack_queue = i;
1692 break;
1693 }
1694 qp->s_ack_state = OP(ACKNOWLEDGE);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001695 qp->s_flags |= RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -07001696 qp->r_nak_state = 0;
1697 qib_schedule_send(qp);
1698
1699unlock_done:
1700 spin_unlock_irqrestore(&qp->s_lock, flags);
1701done:
1702 return 1;
1703
1704send_ack:
1705 return 0;
1706}
1707
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001708static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
Ralph Campbellf9315512010-05-23 21:44:54 -07001709{
1710 unsigned next;
1711
1712 next = n + 1;
1713 if (next > QIB_MAX_RDMA_ATOMIC)
1714 next = 0;
1715 qp->s_tail_ack_queue = next;
1716 qp->s_ack_state = OP(ACKNOWLEDGE);
1717}
1718
1719/**
1720 * qib_rc_rcv - process an incoming RC packet
1721 * @rcd: the context pointer
1722 * @hdr: the header of this packet
1723 * @has_grh: true if the header has a GRH
1724 * @data: the packet data
1725 * @tlen: the packet length
1726 * @qp: the QP for this packet
1727 *
1728 * This is called from qib_qp_rcv() to process an incoming RC packet
1729 * for the given QP.
1730 * Called at interrupt level.
1731 */
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001732void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001733 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -07001734{
1735 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001736 struct ib_other_headers *ohdr;
Ralph Campbellf9315512010-05-23 21:44:54 -07001737 u32 opcode;
1738 u32 hdrsize;
1739 u32 psn;
1740 u32 pad;
1741 struct ib_wc wc;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -04001742 u32 pmtu = qp->pmtu;
Ralph Campbellf9315512010-05-23 21:44:54 -07001743 int diff;
1744 struct ib_reth *reth;
1745 unsigned long flags;
1746 int ret;
1747
1748 /* Check for GRH */
1749 if (!has_grh) {
1750 ohdr = &hdr->u.oth;
1751 hdrsize = 8 + 12; /* LRH + BTH */
1752 } else {
1753 ohdr = &hdr->u.l.oth;
1754 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1755 }
1756
1757 opcode = be32_to_cpu(ohdr->bth[0]);
Ralph Campbellf9315512010-05-23 21:44:54 -07001758 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
Mike Marciniszyn9fd54732011-09-23 13:17:00 -04001759 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07001760
1761 psn = be32_to_cpu(ohdr->bth[2]);
1762 opcode >>= 24;
1763
Ralph Campbellf9315512010-05-23 21:44:54 -07001764 /*
1765 * Process responses (ACKs) before anything else. Note that the
1766 * packet sequence number will be for something in the send work
1767 * queue rather than the expected receive packet sequence number.
1768 * In other words, this QP is the requester.
1769 */
1770 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1771 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1772 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1773 hdrsize, pmtu, rcd);
Ralph Campbella5210c12010-08-02 22:39:30 +00001774 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07001775 }
1776
1777 /* Compute 24 bits worth of difference. */
1778 diff = qib_cmp24(psn, qp->r_psn);
1779 if (unlikely(diff)) {
1780 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
Ralph Campbella5210c12010-08-02 22:39:30 +00001781 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07001782 goto send_ack;
1783 }
1784
1785 /* Check for opcode sequence errors. */
1786 switch (qp->r_state) {
1787 case OP(SEND_FIRST):
1788 case OP(SEND_MIDDLE):
1789 if (opcode == OP(SEND_MIDDLE) ||
1790 opcode == OP(SEND_LAST) ||
1791 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1792 break;
1793 goto nack_inv;
1794
1795 case OP(RDMA_WRITE_FIRST):
1796 case OP(RDMA_WRITE_MIDDLE):
1797 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1798 opcode == OP(RDMA_WRITE_LAST) ||
1799 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1800 break;
1801 goto nack_inv;
1802
1803 default:
1804 if (opcode == OP(SEND_MIDDLE) ||
1805 opcode == OP(SEND_LAST) ||
1806 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1807 opcode == OP(RDMA_WRITE_MIDDLE) ||
1808 opcode == OP(RDMA_WRITE_LAST) ||
1809 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1810 goto nack_inv;
1811 /*
1812 * Note that it is up to the requester to not send a new
1813 * RDMA read or atomic operation before receiving an ACK
1814 * for the previous operation.
1815 */
1816 break;
1817 }
1818
Brian Weltybeb5a042017-02-08 05:27:01 -08001819 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
1820 rvt_comm_est(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001821
1822 /* OK, process the packet. */
1823 switch (opcode) {
1824 case OP(SEND_FIRST):
1825 ret = qib_get_rwqe(qp, 0);
1826 if (ret < 0)
1827 goto nack_op_err;
1828 if (!ret)
1829 goto rnr_nak;
1830 qp->r_rcv_len = 0;
1831 /* FALLTHROUGH */
1832 case OP(SEND_MIDDLE):
1833 case OP(RDMA_WRITE_MIDDLE):
1834send_middle:
1835 /* Check for invalid length PMTU or posted rwqe len. */
1836 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1837 goto nack_inv;
1838 qp->r_rcv_len += pmtu;
1839 if (unlikely(qp->r_rcv_len > qp->r_len))
1840 goto nack_inv;
1841 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
1842 break;
1843
1844 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1845 /* consume RWQE */
1846 ret = qib_get_rwqe(qp, 1);
1847 if (ret < 0)
1848 goto nack_op_err;
1849 if (!ret)
1850 goto rnr_nak;
1851 goto send_last_imm;
1852
1853 case OP(SEND_ONLY):
1854 case OP(SEND_ONLY_WITH_IMMEDIATE):
1855 ret = qib_get_rwqe(qp, 0);
1856 if (ret < 0)
1857 goto nack_op_err;
1858 if (!ret)
1859 goto rnr_nak;
1860 qp->r_rcv_len = 0;
1861 if (opcode == OP(SEND_ONLY))
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04001862 goto no_immediate_data;
1863 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
Ralph Campbellf9315512010-05-23 21:44:54 -07001864 case OP(SEND_LAST_WITH_IMMEDIATE):
1865send_last_imm:
1866 wc.ex.imm_data = ohdr->u.imm_data;
1867 hdrsize += 4;
1868 wc.wc_flags = IB_WC_WITH_IMM;
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04001869 goto send_last;
Ralph Campbellf9315512010-05-23 21:44:54 -07001870 case OP(SEND_LAST):
1871 case OP(RDMA_WRITE_LAST):
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04001872no_immediate_data:
1873 wc.wc_flags = 0;
1874 wc.ex.imm_data = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001875send_last:
1876 /* Get the number of bytes the message was padded by. */
1877 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1878 /* Check for invalid length. */
1879 /* XXX LAST len should be >= 1 */
1880 if (unlikely(tlen < (hdrsize + pad + 4)))
1881 goto nack_inv;
1882 /* Don't count the CRC. */
1883 tlen -= (hdrsize + pad + 4);
1884 wc.byte_len = tlen + qp->r_rcv_len;
1885 if (unlikely(wc.byte_len > qp->r_len))
1886 goto nack_inv;
1887 qib_copy_sge(&qp->r_sge, data, tlen, 1);
Harish Chegondi70696ea2016-02-03 14:20:27 -08001888 rvt_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -07001889 qp->r_msn++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001890 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
Ralph Campbellf9315512010-05-23 21:44:54 -07001891 break;
1892 wc.wr_id = qp->r_wr_id;
1893 wc.status = IB_WC_SUCCESS;
1894 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1895 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1896 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1897 else
1898 wc.opcode = IB_WC_RECV;
1899 wc.qp = &qp->ibqp;
1900 wc.src_qp = qp->remote_qpn;
1901 wc.slid = qp->remote_ah_attr.dlid;
1902 wc.sl = qp->remote_ah_attr.sl;
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04001903 /* zero fields that are N/A */
1904 wc.vendor_err = 0;
1905 wc.pkey_index = 0;
1906 wc.dlid_path_bits = 0;
1907 wc.port_num = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001908 /* Signal completion event if the solicited bit is set. */
Harish Chegondi4bb88e52016-01-22 13:07:36 -08001909 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
Ralph Campbellf9315512010-05-23 21:44:54 -07001910 (ohdr->bth[0] &
1911 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
1912 break;
1913
1914 case OP(RDMA_WRITE_FIRST):
1915 case OP(RDMA_WRITE_ONLY):
1916 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1917 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
1918 goto nack_inv;
1919 /* consume RWQE */
1920 reth = &ohdr->u.rc.reth;
1921 hdrsize += sizeof(*reth);
1922 qp->r_len = be32_to_cpu(reth->length);
1923 qp->r_rcv_len = 0;
1924 qp->r_sge.sg_list = NULL;
1925 if (qp->r_len != 0) {
1926 u32 rkey = be32_to_cpu(reth->rkey);
1927 u64 vaddr = be64_to_cpu(reth->vaddr);
1928 int ok;
1929
1930 /* Check rkey & NAK */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001931 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
Ralph Campbellf9315512010-05-23 21:44:54 -07001932 rkey, IB_ACCESS_REMOTE_WRITE);
1933 if (unlikely(!ok))
1934 goto nack_acc;
1935 qp->r_sge.num_sge = 1;
1936 } else {
1937 qp->r_sge.num_sge = 0;
1938 qp->r_sge.sge.mr = NULL;
1939 qp->r_sge.sge.vaddr = NULL;
1940 qp->r_sge.sge.length = 0;
1941 qp->r_sge.sge.sge_length = 0;
1942 }
1943 if (opcode == OP(RDMA_WRITE_FIRST))
1944 goto send_middle;
1945 else if (opcode == OP(RDMA_WRITE_ONLY))
Mike Marciniszyn2fc109c2011-09-23 13:16:29 -04001946 goto no_immediate_data;
Ralph Campbellf9315512010-05-23 21:44:54 -07001947 ret = qib_get_rwqe(qp, 1);
1948 if (ret < 0)
1949 goto nack_op_err;
1950 if (!ret)
1951 goto rnr_nak;
Jason Gunthorpe5715f5d2010-10-22 22:00:48 +00001952 wc.ex.imm_data = ohdr->u.rc.imm_data;
1953 hdrsize += 4;
1954 wc.wc_flags = IB_WC_WITH_IMM;
1955 goto send_last;
Ralph Campbellf9315512010-05-23 21:44:54 -07001956
1957 case OP(RDMA_READ_REQUEST): {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001958 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -07001959 u32 len;
1960 u8 next;
1961
1962 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
1963 goto nack_inv;
1964 next = qp->r_head_ack_queue + 1;
1965 /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
1966 if (next > QIB_MAX_RDMA_ATOMIC)
1967 next = 0;
1968 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001969 if (unlikely(next == qp->s_tail_ack_queue)) {
1970 if (!qp->s_ack_queue[next].sent)
1971 goto nack_inv_unlck;
1972 qib_update_ack_queue(qp, next);
1973 }
1974 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1975 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001976 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001977 e->rdma_sge.mr = NULL;
1978 }
1979 reth = &ohdr->u.rc.reth;
1980 len = be32_to_cpu(reth->length);
1981 if (len) {
1982 u32 rkey = be32_to_cpu(reth->rkey);
1983 u64 vaddr = be64_to_cpu(reth->vaddr);
1984 int ok;
1985
1986 /* Check rkey & NAK */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001987 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
Ralph Campbellf9315512010-05-23 21:44:54 -07001988 rkey, IB_ACCESS_REMOTE_READ);
1989 if (unlikely(!ok))
1990 goto nack_acc_unlck;
1991 /*
1992 * Update the next expected PSN. We add 1 later
1993 * below, so only add the remainder here.
1994 */
Mike Marciniszyn5dc80602016-12-07 19:34:37 -08001995 qp->r_psn += rvt_div_mtu(qp, len - 1);
Ralph Campbellf9315512010-05-23 21:44:54 -07001996 } else {
1997 e->rdma_sge.mr = NULL;
1998 e->rdma_sge.vaddr = NULL;
1999 e->rdma_sge.length = 0;
2000 e->rdma_sge.sge_length = 0;
2001 }
2002 e->opcode = opcode;
2003 e->sent = 0;
2004 e->psn = psn;
2005 e->lpsn = qp->r_psn;
2006 /*
2007 * We need to increment the MSN here instead of when we
2008 * finish sending the result since a duplicate request would
2009 * increment it more than once.
2010 */
2011 qp->r_msn++;
2012 qp->r_psn++;
2013 qp->r_state = opcode;
2014 qp->r_nak_state = 0;
2015 qp->r_head_ack_queue = next;
2016
2017 /* Schedule the send tasklet. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002018 qp->s_flags |= RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -07002019 qib_schedule_send(qp);
2020
Ralph Campbella5210c12010-08-02 22:39:30 +00002021 goto sunlock;
Ralph Campbellf9315512010-05-23 21:44:54 -07002022 }
2023
2024 case OP(COMPARE_SWAP):
2025 case OP(FETCH_ADD): {
2026 struct ib_atomic_eth *ateth;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002027 struct rvt_ack_entry *e;
Ralph Campbellf9315512010-05-23 21:44:54 -07002028 u64 vaddr;
2029 atomic64_t *maddr;
2030 u64 sdata;
2031 u32 rkey;
2032 u8 next;
2033
2034 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2035 goto nack_inv;
2036 next = qp->r_head_ack_queue + 1;
2037 if (next > QIB_MAX_RDMA_ATOMIC)
2038 next = 0;
2039 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07002040 if (unlikely(next == qp->s_tail_ack_queue)) {
2041 if (!qp->s_ack_queue[next].sent)
2042 goto nack_inv_unlck;
2043 qib_update_ack_queue(qp, next);
2044 }
2045 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2046 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002047 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07002048 e->rdma_sge.mr = NULL;
2049 }
2050 ateth = &ohdr->u.atomic_eth;
Mike Marciniszyn261a4352016-09-06 04:35:05 -07002051 vaddr = get_ib_ateth_vaddr(ateth);
Ralph Campbellf9315512010-05-23 21:44:54 -07002052 if (unlikely(vaddr & (sizeof(u64) - 1)))
2053 goto nack_inv_unlck;
2054 rkey = be32_to_cpu(ateth->rkey);
2055 /* Check rkey & NAK */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002056 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
Ralph Campbellf9315512010-05-23 21:44:54 -07002057 vaddr, rkey,
2058 IB_ACCESS_REMOTE_ATOMIC)))
2059 goto nack_acc_unlck;
2060 /* Perform atomic OP and save result. */
2061 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
Mike Marciniszyn261a4352016-09-06 04:35:05 -07002062 sdata = get_ib_ateth_swap(ateth);
Ralph Campbellf9315512010-05-23 21:44:54 -07002063 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2064 (u64) atomic64_add_return(sdata, maddr) - sdata :
2065 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
Mike Marciniszyn261a4352016-09-06 04:35:05 -07002066 get_ib_ateth_compare(ateth),
Ralph Campbellf9315512010-05-23 21:44:54 -07002067 sdata);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002068 rvt_put_mr(qp->r_sge.sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07002069 qp->r_sge.num_sge = 0;
2070 e->opcode = opcode;
2071 e->sent = 0;
2072 e->psn = psn;
2073 e->lpsn = psn;
2074 qp->r_msn++;
2075 qp->r_psn++;
2076 qp->r_state = opcode;
2077 qp->r_nak_state = 0;
2078 qp->r_head_ack_queue = next;
2079
2080 /* Schedule the send tasklet. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002081 qp->s_flags |= RVT_S_RESP_PENDING;
Ralph Campbellf9315512010-05-23 21:44:54 -07002082 qib_schedule_send(qp);
2083
Ralph Campbella5210c12010-08-02 22:39:30 +00002084 goto sunlock;
Ralph Campbellf9315512010-05-23 21:44:54 -07002085 }
2086
2087 default:
2088 /* NAK unknown opcodes. */
2089 goto nack_inv;
2090 }
2091 qp->r_psn++;
2092 qp->r_state = opcode;
2093 qp->r_ack_psn = psn;
2094 qp->r_nak_state = 0;
2095 /* Send an ACK if requested or required. */
2096 if (psn & (1 << 31))
2097 goto send_ack;
Ralph Campbella5210c12010-08-02 22:39:30 +00002098 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002099
2100rnr_nak:
2101 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2102 qp->r_ack_psn = qp->r_psn;
2103 /* Queue RNR NAK for later */
2104 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002105 qp->r_flags |= RVT_R_RSP_NAK;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07002106 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07002107 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2108 }
Ralph Campbella5210c12010-08-02 22:39:30 +00002109 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002110
2111nack_op_err:
Brian Weltybeb5a042017-02-08 05:27:01 -08002112 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -07002113 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2114 qp->r_ack_psn = qp->r_psn;
2115 /* Queue NAK for later */
2116 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002117 qp->r_flags |= RVT_R_RSP_NAK;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07002118 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07002119 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2120 }
Ralph Campbella5210c12010-08-02 22:39:30 +00002121 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002122
2123nack_inv_unlck:
2124 spin_unlock_irqrestore(&qp->s_lock, flags);
2125nack_inv:
Brian Weltybeb5a042017-02-08 05:27:01 -08002126 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -07002127 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2128 qp->r_ack_psn = qp->r_psn;
2129 /* Queue NAK for later */
2130 if (list_empty(&qp->rspwait)) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08002131 qp->r_flags |= RVT_R_RSP_NAK;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07002132 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07002133 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2134 }
Ralph Campbella5210c12010-08-02 22:39:30 +00002135 return;
Ralph Campbellf9315512010-05-23 21:44:54 -07002136
2137nack_acc_unlck:
2138 spin_unlock_irqrestore(&qp->s_lock, flags);
2139nack_acc:
Brian Weltybeb5a042017-02-08 05:27:01 -08002140 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -07002141 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2142 qp->r_ack_psn = qp->r_psn;
2143send_ack:
2144 qib_send_rc_ack(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07002145 return;
2146
2147sunlock:
2148 spin_unlock_irqrestore(&qp->s_lock, flags);
2149}