blob: b03a6206d9be7e690648e547d9567eab34dbc515 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/skbuff.h>
35
36#include "rxe.h"
37#include "rxe_loc.h"
38#include "rxe_queue.h"
39#include "rxe_task.h"
40
41enum comp_state {
42 COMPST_GET_ACK,
43 COMPST_GET_WQE,
44 COMPST_COMP_WQE,
45 COMPST_COMP_ACK,
46 COMPST_CHECK_PSN,
47 COMPST_CHECK_ACK,
48 COMPST_READ,
49 COMPST_ATOMIC,
50 COMPST_WRITE_SEND,
51 COMPST_UPDATE_COMP,
52 COMPST_ERROR_RETRY,
53 COMPST_RNR_RETRY,
54 COMPST_ERROR,
55 COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
56 COMPST_DONE, /* The completer finished successflly */
57};
58
59static char *comp_state_name[] = {
60 [COMPST_GET_ACK] = "GET ACK",
61 [COMPST_GET_WQE] = "GET WQE",
62 [COMPST_COMP_WQE] = "COMP WQE",
63 [COMPST_COMP_ACK] = "COMP ACK",
64 [COMPST_CHECK_PSN] = "CHECK PSN",
65 [COMPST_CHECK_ACK] = "CHECK ACK",
66 [COMPST_READ] = "READ",
67 [COMPST_ATOMIC] = "ATOMIC",
68 [COMPST_WRITE_SEND] = "WRITE/SEND",
69 [COMPST_UPDATE_COMP] = "UPDATE COMP",
70 [COMPST_ERROR_RETRY] = "ERROR RETRY",
71 [COMPST_RNR_RETRY] = "RNR RETRY",
72 [COMPST_ERROR] = "ERROR",
73 [COMPST_EXIT] = "EXIT",
74 [COMPST_DONE] = "DONE",
75};
76
77static unsigned long rnrnak_usec[32] = {
78 [IB_RNR_TIMER_655_36] = 655360,
79 [IB_RNR_TIMER_000_01] = 10,
80 [IB_RNR_TIMER_000_02] = 20,
81 [IB_RNR_TIMER_000_03] = 30,
82 [IB_RNR_TIMER_000_04] = 40,
83 [IB_RNR_TIMER_000_06] = 60,
84 [IB_RNR_TIMER_000_08] = 80,
85 [IB_RNR_TIMER_000_12] = 120,
86 [IB_RNR_TIMER_000_16] = 160,
87 [IB_RNR_TIMER_000_24] = 240,
88 [IB_RNR_TIMER_000_32] = 320,
89 [IB_RNR_TIMER_000_48] = 480,
90 [IB_RNR_TIMER_000_64] = 640,
91 [IB_RNR_TIMER_000_96] = 960,
92 [IB_RNR_TIMER_001_28] = 1280,
93 [IB_RNR_TIMER_001_92] = 1920,
94 [IB_RNR_TIMER_002_56] = 2560,
95 [IB_RNR_TIMER_003_84] = 3840,
96 [IB_RNR_TIMER_005_12] = 5120,
97 [IB_RNR_TIMER_007_68] = 7680,
98 [IB_RNR_TIMER_010_24] = 10240,
99 [IB_RNR_TIMER_015_36] = 15360,
100 [IB_RNR_TIMER_020_48] = 20480,
101 [IB_RNR_TIMER_030_72] = 30720,
102 [IB_RNR_TIMER_040_96] = 40960,
103 [IB_RNR_TIMER_061_44] = 61410,
104 [IB_RNR_TIMER_081_92] = 81920,
105 [IB_RNR_TIMER_122_88] = 122880,
106 [IB_RNR_TIMER_163_84] = 163840,
107 [IB_RNR_TIMER_245_76] = 245760,
108 [IB_RNR_TIMER_327_68] = 327680,
109 [IB_RNR_TIMER_491_52] = 491520,
110};
111
112static inline unsigned long rnrnak_jiffies(u8 timeout)
113{
114 return max_t(unsigned long,
115 usecs_to_jiffies(rnrnak_usec[timeout]), 1);
116}
117
118static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
119{
120 switch (opcode) {
121 case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE;
122 case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE;
123 case IB_WR_SEND: return IB_WC_SEND;
124 case IB_WR_SEND_WITH_IMM: return IB_WC_SEND;
125 case IB_WR_RDMA_READ: return IB_WC_RDMA_READ;
126 case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP;
127 case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD;
128 case IB_WR_LSO: return IB_WC_LSO;
129 case IB_WR_SEND_WITH_INV: return IB_WC_SEND;
130 case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
131 case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
132 case IB_WR_REG_MR: return IB_WC_REG_MR;
133
134 default:
135 return 0xff;
136 }
137}
138
139void retransmit_timer(unsigned long data)
140{
141 struct rxe_qp *qp = (struct rxe_qp *)data;
142
143 if (qp->valid) {
144 qp->comp.timeout = 1;
145 rxe_run_task(&qp->comp.task, 1);
146 }
147}
148
149void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
150 struct sk_buff *skb)
151{
152 int must_sched;
153
154 skb_queue_tail(&qp->resp_pkts, skb);
155
156 must_sched = skb_queue_len(&qp->resp_pkts) > 1;
157 rxe_run_task(&qp->comp.task, must_sched);
158}
159
160static inline enum comp_state get_wqe(struct rxe_qp *qp,
161 struct rxe_pkt_info *pkt,
162 struct rxe_send_wqe **wqe_p)
163{
164 struct rxe_send_wqe *wqe;
165
166 /* we come here whether or not we found a response packet to see if
167 * there are any posted WQEs
168 */
169 wqe = queue_head(qp->sq.queue);
170 *wqe_p = wqe;
171
172 /* no WQE or requester has not started it yet */
173 if (!wqe || wqe->state == wqe_state_posted)
174 return pkt ? COMPST_DONE : COMPST_EXIT;
175
176 /* WQE does not require an ack */
177 if (wqe->state == wqe_state_done)
178 return COMPST_COMP_WQE;
179
180 /* WQE caused an error */
181 if (wqe->state == wqe_state_error)
182 return COMPST_ERROR;
183
184 /* we have a WQE, if we also have an ack check its PSN */
185 return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
186}
187
188static inline void reset_retry_counters(struct rxe_qp *qp)
189{
190 qp->comp.retry_cnt = qp->attr.retry_cnt;
191 qp->comp.rnr_retry = qp->attr.rnr_retry;
192}
193
194static inline enum comp_state check_psn(struct rxe_qp *qp,
195 struct rxe_pkt_info *pkt,
196 struct rxe_send_wqe *wqe)
197{
198 s32 diff;
199
200 /* check to see if response is past the oldest WQE. if it is, complete
201 * send/write or error read/atomic
202 */
203 diff = psn_compare(pkt->psn, wqe->last_psn);
204 if (diff > 0) {
205 if (wqe->state == wqe_state_pending) {
206 if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
207 return COMPST_ERROR_RETRY;
208
209 reset_retry_counters(qp);
210 return COMPST_COMP_WQE;
211 } else {
212 return COMPST_DONE;
213 }
214 }
215
216 /* compare response packet to expected response */
217 diff = psn_compare(pkt->psn, qp->comp.psn);
218 if (diff < 0) {
219 /* response is most likely a retried packet if it matches an
220 * uncompleted WQE go complete it else ignore it
221 */
222 if (pkt->psn == wqe->last_psn)
223 return COMPST_COMP_ACK;
224 else
225 return COMPST_DONE;
226 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
227 return COMPST_ERROR_RETRY;
228 } else {
229 return COMPST_CHECK_ACK;
230 }
231}
232
233static inline enum comp_state check_ack(struct rxe_qp *qp,
234 struct rxe_pkt_info *pkt,
235 struct rxe_send_wqe *wqe)
236{
237 unsigned int mask = pkt->mask;
238 u8 syn;
239
240 /* Check the sequence only */
241 switch (qp->comp.opcode) {
242 case -1:
243 /* Will catch all *_ONLY cases. */
244 if (!(mask & RXE_START_MASK))
245 return COMPST_ERROR;
246
247 break;
248
249 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
250 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
251 if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
252 pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
Vijay Immanuelb31b2df2018-06-13 18:48:07 -0700253 /* read retries of partial data may restart from
254 * read response first or response only.
255 */
256 if ((pkt->psn == wqe->first_psn &&
257 pkt->opcode ==
258 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
259 (wqe->first_psn == wqe->last_psn &&
260 pkt->opcode ==
261 IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
262 break;
263
Moni Shoua8700e3e2016-06-16 16:45:23 +0300264 return COMPST_ERROR;
265 }
266 break;
267 default:
268 WARN_ON(1);
269 }
270
271 /* Check operation validity. */
272 switch (pkt->opcode) {
273 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
274 case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
275 case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
276 syn = aeth_syn(pkt);
277
278 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
279 return COMPST_ERROR;
280
281 /* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
282 * doesn't have an AETH)
283 */
284 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
285 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
286 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
Bart Van Asschee4f53122018-06-26 08:39:36 -0700287 wqe->status = IB_WC_FATAL_ERR;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300288 return COMPST_ERROR;
289 }
290 reset_retry_counters(qp);
291 return COMPST_READ;
292
293 case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
294 syn = aeth_syn(pkt);
295
296 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
297 return COMPST_ERROR;
298
299 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
300 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
301 return COMPST_ERROR;
302 reset_retry_counters(qp);
303 return COMPST_ATOMIC;
304
305 case IB_OPCODE_RC_ACKNOWLEDGE:
306 syn = aeth_syn(pkt);
307 switch (syn & AETH_TYPE_MASK) {
308 case AETH_ACK:
309 reset_retry_counters(qp);
310 return COMPST_WRITE_SEND;
311
312 case AETH_RNR_NAK:
313 return COMPST_RNR_RETRY;
314
315 case AETH_NAK:
316 switch (syn) {
317 case AETH_NAK_PSN_SEQ_ERROR:
318 /* a nak implicitly acks all packets with psns
319 * before
320 */
321 if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
322 qp->comp.psn = pkt->psn;
323 if (qp->req.wait_psn) {
324 qp->req.wait_psn = 0;
325 rxe_run_task(&qp->req.task, 1);
326 }
327 }
328 return COMPST_ERROR_RETRY;
329
330 case AETH_NAK_INVALID_REQ:
331 wqe->status = IB_WC_REM_INV_REQ_ERR;
332 return COMPST_ERROR;
333
334 case AETH_NAK_REM_ACC_ERR:
335 wqe->status = IB_WC_REM_ACCESS_ERR;
336 return COMPST_ERROR;
337
338 case AETH_NAK_REM_OP_ERR:
339 wqe->status = IB_WC_REM_OP_ERR;
340 return COMPST_ERROR;
341
342 default:
343 pr_warn("unexpected nak %x\n", syn);
344 wqe->status = IB_WC_REM_OP_ERR;
345 return COMPST_ERROR;
346 }
347
348 default:
349 return COMPST_ERROR;
350 }
351 break;
352
353 default:
354 pr_warn("unexpected opcode\n");
355 }
356
357 return COMPST_ERROR;
358}
359
360static inline enum comp_state do_read(struct rxe_qp *qp,
361 struct rxe_pkt_info *pkt,
362 struct rxe_send_wqe *wqe)
363{
364 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
365 int ret;
366
367 ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
368 &wqe->dma, payload_addr(pkt),
369 payload_size(pkt), to_mem_obj, NULL);
370 if (ret)
371 return COMPST_ERROR;
372
373 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
374 return COMPST_COMP_ACK;
375 else
376 return COMPST_UPDATE_COMP;
377}
378
379static inline enum comp_state do_atomic(struct rxe_qp *qp,
380 struct rxe_pkt_info *pkt,
381 struct rxe_send_wqe *wqe)
382{
383 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
384 int ret;
385
386 u64 atomic_orig = atmack_orig(pkt);
387
388 ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
389 &wqe->dma, &atomic_orig,
390 sizeof(u64), to_mem_obj, NULL);
391 if (ret)
392 return COMPST_ERROR;
393 else
394 return COMPST_COMP_ACK;
395}
396
397static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
398 struct rxe_cqe *cqe)
399{
400 memset(cqe, 0, sizeof(*cqe));
401
402 if (!qp->is_user) {
403 struct ib_wc *wc = &cqe->ibwc;
404
405 wc->wr_id = wqe->wr.wr_id;
406 wc->status = wqe->status;
407 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
408 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
409 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
410 wc->wc_flags = IB_WC_WITH_IMM;
411 wc->byte_len = wqe->dma.length;
412 wc->qp = &qp->ibqp;
413 } else {
414 struct ib_uverbs_wc *uwc = &cqe->uibwc;
415
416 uwc->wr_id = wqe->wr.wr_id;
417 uwc->status = wqe->status;
418 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
419 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
420 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
421 uwc->wc_flags = IB_WC_WITH_IMM;
422 uwc->byte_len = wqe->dma.length;
423 uwc->qp_num = qp->ibqp.qp_num;
424 }
425}
426
427static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
428{
429 struct rxe_cqe cqe;
430
431 if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
432 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
433 (qp->req.state == QP_STATE_ERROR)) {
434 make_send_cqe(qp, wqe, &cqe);
435 rxe_cq_post(qp->scq, &cqe, 0);
436 }
437
438 advance_consumer(qp->sq.queue);
439
440 /*
441 * we completed something so let req run again
442 * if it is trying to fence
443 */
444 if (qp->req.wait_fence) {
445 qp->req.wait_fence = 0;
446 rxe_run_task(&qp->req.task, 1);
447 }
448}
449
450static inline enum comp_state complete_ack(struct rxe_qp *qp,
451 struct rxe_pkt_info *pkt,
452 struct rxe_send_wqe *wqe)
453{
454 unsigned long flags;
455
456 if (wqe->has_rd_atomic) {
457 wqe->has_rd_atomic = 0;
458 atomic_inc(&qp->req.rd_atomic);
459 if (qp->req.need_rd_atomic) {
460 qp->comp.timeout_retry = 0;
461 qp->req.need_rd_atomic = 0;
462 rxe_run_task(&qp->req.task, 1);
463 }
464 }
465
466 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
467 /* state_lock used by requester & completer */
468 spin_lock_irqsave(&qp->state_lock, flags);
469 if ((qp->req.state == QP_STATE_DRAIN) &&
470 (qp->comp.psn == qp->req.psn)) {
471 qp->req.state = QP_STATE_DRAINED;
472 spin_unlock_irqrestore(&qp->state_lock, flags);
473
474 if (qp->ibqp.event_handler) {
475 struct ib_event ev;
476
477 ev.device = qp->ibqp.device;
478 ev.element.qp = &qp->ibqp;
479 ev.event = IB_EVENT_SQ_DRAINED;
480 qp->ibqp.event_handler(&ev,
481 qp->ibqp.qp_context);
482 }
483 } else {
484 spin_unlock_irqrestore(&qp->state_lock, flags);
485 }
486 }
487
488 do_complete(qp, wqe);
489
490 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
491 return COMPST_UPDATE_COMP;
492 else
493 return COMPST_DONE;
494}
495
496static inline enum comp_state complete_wqe(struct rxe_qp *qp,
497 struct rxe_pkt_info *pkt,
498 struct rxe_send_wqe *wqe)
499{
Vijay Immanuelb31b2df2018-06-13 18:48:07 -0700500 if (pkt && wqe->state == wqe_state_pending) {
501 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
502 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
503 qp->comp.opcode = -1;
504 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300505
506 if (qp->req.wait_psn) {
507 qp->req.wait_psn = 0;
508 rxe_run_task(&qp->req.task, 1);
509 }
510 }
511
512 do_complete(qp, wqe);
513
514 return COMPST_GET_WQE;
515}
516
517int rxe_completer(void *arg)
518{
519 struct rxe_qp *qp = (struct rxe_qp *)arg;
520 struct rxe_send_wqe *wqe = wqe;
521 struct sk_buff *skb = NULL;
522 struct rxe_pkt_info *pkt = NULL;
523 enum comp_state state;
524
525 if (!qp->valid) {
526 while ((skb = skb_dequeue(&qp->resp_pkts))) {
527 rxe_drop_ref(qp);
528 kfree_skb(skb);
529 }
530 skb = NULL;
531 pkt = NULL;
532
533 while (queue_head(qp->sq.queue))
534 advance_consumer(qp->sq.queue);
535
536 goto exit;
537 }
538
539 if (qp->req.state == QP_STATE_ERROR) {
540 while ((skb = skb_dequeue(&qp->resp_pkts))) {
541 rxe_drop_ref(qp);
542 kfree_skb(skb);
543 }
544 skb = NULL;
545 pkt = NULL;
546
547 while ((wqe = queue_head(qp->sq.queue))) {
548 wqe->status = IB_WC_WR_FLUSH_ERR;
549 do_complete(qp, wqe);
550 }
551
552 goto exit;
553 }
554
555 if (qp->req.state == QP_STATE_RESET) {
556 while ((skb = skb_dequeue(&qp->resp_pkts))) {
557 rxe_drop_ref(qp);
558 kfree_skb(skb);
559 }
560 skb = NULL;
561 pkt = NULL;
562
563 while (queue_head(qp->sq.queue))
564 advance_consumer(qp->sq.queue);
565
566 goto exit;
567 }
568
569 if (qp->comp.timeout) {
570 qp->comp.timeout_retry = 1;
571 qp->comp.timeout = 0;
572 } else {
573 qp->comp.timeout_retry = 0;
574 }
575
576 if (qp->req.need_retry)
577 goto exit;
578
579 state = COMPST_GET_ACK;
580
581 while (1) {
Parav Pandite404f942016-09-28 20:26:26 +0000582 pr_debug("qp#%d state = %s\n", qp_num(qp),
583 comp_state_name[state]);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300584 switch (state) {
585 case COMPST_GET_ACK:
586 skb = skb_dequeue(&qp->resp_pkts);
587 if (skb) {
588 pkt = SKB_TO_PKT(skb);
589 qp->comp.timeout_retry = 0;
590 }
591 state = COMPST_GET_WQE;
592 break;
593
594 case COMPST_GET_WQE:
595 state = get_wqe(qp, pkt, &wqe);
596 break;
597
598 case COMPST_CHECK_PSN:
599 state = check_psn(qp, pkt, wqe);
600 break;
601
602 case COMPST_CHECK_ACK:
603 state = check_ack(qp, pkt, wqe);
604 break;
605
606 case COMPST_READ:
607 state = do_read(qp, pkt, wqe);
608 break;
609
610 case COMPST_ATOMIC:
611 state = do_atomic(qp, pkt, wqe);
612 break;
613
614 case COMPST_WRITE_SEND:
615 if (wqe->state == wqe_state_pending &&
616 wqe->last_psn == pkt->psn)
617 state = COMPST_COMP_ACK;
618 else
619 state = COMPST_UPDATE_COMP;
620 break;
621
622 case COMPST_COMP_ACK:
623 state = complete_ack(qp, pkt, wqe);
624 break;
625
626 case COMPST_COMP_WQE:
627 state = complete_wqe(qp, pkt, wqe);
628 break;
629
630 case COMPST_UPDATE_COMP:
631 if (pkt->mask & RXE_END_MASK)
632 qp->comp.opcode = -1;
633 else
634 qp->comp.opcode = pkt->opcode;
635
636 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
637 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
638
639 if (qp->req.wait_psn) {
640 qp->req.wait_psn = 0;
641 rxe_run_task(&qp->req.task, 1);
642 }
643
644 state = COMPST_DONE;
645 break;
646
647 case COMPST_DONE:
648 if (pkt) {
649 rxe_drop_ref(pkt->qp);
650 kfree_skb(skb);
651 }
652 goto done;
653
654 case COMPST_EXIT:
655 if (qp->comp.timeout_retry && wqe) {
656 state = COMPST_ERROR_RETRY;
657 break;
658 }
659
660 /* re reset the timeout counter if
661 * (1) QP is type RC
662 * (2) the QP is alive
663 * (3) there is a packet sent by the requester that
664 * might be acked (we still might get spurious
665 * timeouts but try to keep them as few as possible)
666 * (4) the timeout parameter is set
667 */
668 if ((qp_type(qp) == IB_QPT_RC) &&
669 (qp->req.state == QP_STATE_READY) &&
670 (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
671 qp->qp_timeout_jiffies)
672 mod_timer(&qp->retrans_timer,
673 jiffies + qp->qp_timeout_jiffies);
674 goto exit;
675
676 case COMPST_ERROR_RETRY:
677 /* we come here if the retry timer fired and we did
678 * not receive a response packet. try to retry the send
679 * queue if that makes sense and the limits have not
680 * been exceeded. remember that some timeouts are
681 * spurious since we do not reset the timer but kick
682 * it down the road or let it expire
683 */
684
685 /* there is nothing to retry in this case */
686 if (!wqe || (wqe->state == wqe_state_posted))
687 goto exit;
688
689 if (qp->comp.retry_cnt > 0) {
690 if (qp->comp.retry_cnt != 7)
691 qp->comp.retry_cnt--;
692
693 /* no point in retrying if we have already
694 * seen the last ack that the requester could
695 * have caused
696 */
697 if (psn_compare(qp->req.psn,
698 qp->comp.psn) > 0) {
699 /* tell the requester to retry the
700 * send send queue next time around
701 */
702 qp->req.need_retry = 1;
703 rxe_run_task(&qp->req.task, 1);
704 }
Yonatan Cohenc1cc72c2016-09-07 14:04:07 +0300705
706 if (pkt) {
707 rxe_drop_ref(pkt->qp);
708 kfree_skb(skb);
709 }
710
Moni Shoua8700e3e2016-06-16 16:45:23 +0300711 goto exit;
Yonatan Cohenc1cc72c2016-09-07 14:04:07 +0300712
Moni Shoua8700e3e2016-06-16 16:45:23 +0300713 } else {
714 wqe->status = IB_WC_RETRY_EXC_ERR;
715 state = COMPST_ERROR;
716 }
717 break;
718
719 case COMPST_RNR_RETRY:
720 if (qp->comp.rnr_retry > 0) {
721 if (qp->comp.rnr_retry != 7)
722 qp->comp.rnr_retry--;
723
724 qp->req.need_retry = 1;
Parav Pandite404f942016-09-28 20:26:26 +0000725 pr_debug("qp#%d set rnr nak timer\n",
726 qp_num(qp));
Moni Shoua8700e3e2016-06-16 16:45:23 +0300727 mod_timer(&qp->rnr_nak_timer,
728 jiffies + rnrnak_jiffies(aeth_syn(pkt)
729 & ~AETH_TYPE_MASK));
730 goto exit;
731 } else {
732 wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
733 state = COMPST_ERROR;
734 }
735 break;
736
737 case COMPST_ERROR:
738 do_complete(qp, wqe);
739 rxe_qp_error(qp);
Yonatan Cohenc1cc72c2016-09-07 14:04:07 +0300740
741 if (pkt) {
742 rxe_drop_ref(pkt->qp);
743 kfree_skb(skb);
744 }
745
Moni Shoua8700e3e2016-06-16 16:45:23 +0300746 goto exit;
747 }
748 }
749
750exit:
751 /* we come here if we are done with processing and want the task to
752 * exit from the loop calling us
753 */
754 return -EAGAIN;
755
756done:
757 /* we come here if we have processed a packet we want the task to call
758 * us again to see if there is anything else to do
759 */
760 return 0;
761}