blob: d544b5e84797a14e454588364e5855fc021fe9e6 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/skbuff.h>
35
36#include "rxe.h"
37#include "rxe_loc.h"
38#include "rxe_queue.h"
39
40enum resp_states {
41 RESPST_NONE,
42 RESPST_GET_REQ,
43 RESPST_CHK_PSN,
44 RESPST_CHK_OP_SEQ,
45 RESPST_CHK_OP_VALID,
46 RESPST_CHK_RESOURCE,
47 RESPST_CHK_LENGTH,
48 RESPST_CHK_RKEY,
49 RESPST_EXECUTE,
50 RESPST_READ_REPLY,
51 RESPST_COMPLETE,
52 RESPST_ACKNOWLEDGE,
53 RESPST_CLEANUP,
54 RESPST_DUPLICATE_REQUEST,
55 RESPST_ERR_MALFORMED_WQE,
56 RESPST_ERR_UNSUPPORTED_OPCODE,
57 RESPST_ERR_MISALIGNED_ATOMIC,
58 RESPST_ERR_PSN_OUT_OF_SEQ,
59 RESPST_ERR_MISSING_OPCODE_FIRST,
60 RESPST_ERR_MISSING_OPCODE_LAST_C,
61 RESPST_ERR_MISSING_OPCODE_LAST_D1E,
62 RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
63 RESPST_ERR_RNR,
64 RESPST_ERR_RKEY_VIOLATION,
65 RESPST_ERR_LENGTH,
66 RESPST_ERR_CQ_OVERFLOW,
67 RESPST_ERROR,
68 RESPST_RESET,
69 RESPST_DONE,
70 RESPST_EXIT,
71};
72
73static char *resp_state_name[] = {
74 [RESPST_NONE] = "NONE",
75 [RESPST_GET_REQ] = "GET_REQ",
76 [RESPST_CHK_PSN] = "CHK_PSN",
77 [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
78 [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
79 [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
80 [RESPST_CHK_LENGTH] = "CHK_LENGTH",
81 [RESPST_CHK_RKEY] = "CHK_RKEY",
82 [RESPST_EXECUTE] = "EXECUTE",
83 [RESPST_READ_REPLY] = "READ_REPLY",
84 [RESPST_COMPLETE] = "COMPLETE",
85 [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
86 [RESPST_CLEANUP] = "CLEANUP",
87 [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
88 [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
89 [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
90 [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
91 [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
92 [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
93 [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
94 [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
95 [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
96 [RESPST_ERR_RNR] = "ERR_RNR",
97 [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
98 [RESPST_ERR_LENGTH] = "ERR_LENGTH",
99 [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
100 [RESPST_ERROR] = "ERROR",
101 [RESPST_RESET] = "RESET",
102 [RESPST_DONE] = "DONE",
103 [RESPST_EXIT] = "EXIT",
104};
105
106/* rxe_recv calls here to add a request packet to the input queue */
107void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
108 struct sk_buff *skb)
109{
110 int must_sched;
111 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
112
113 skb_queue_tail(&qp->req_pkts, skb);
114
115 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
116 (skb_queue_len(&qp->req_pkts) > 1);
117
118 rxe_run_task(&qp->resp.task, must_sched);
119}
120
121static inline enum resp_states get_req(struct rxe_qp *qp,
122 struct rxe_pkt_info **pkt_p)
123{
124 struct sk_buff *skb;
125
126 if (qp->resp.state == QP_STATE_ERROR) {
127 skb = skb_dequeue(&qp->req_pkts);
128 if (skb) {
129 /* drain request packet queue */
130 rxe_drop_ref(qp);
131 kfree_skb(skb);
132 return RESPST_GET_REQ;
133 }
134
135 /* go drain recv wr queue */
136 return RESPST_CHK_RESOURCE;
137 }
138
139 skb = skb_peek(&qp->req_pkts);
140 if (!skb)
141 return RESPST_EXIT;
142
143 *pkt_p = SKB_TO_PKT(skb);
144
145 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
146}
147
148static enum resp_states check_psn(struct rxe_qp *qp,
149 struct rxe_pkt_info *pkt)
150{
151 int diff = psn_compare(pkt->psn, qp->resp.psn);
152
153 switch (qp_type(qp)) {
154 case IB_QPT_RC:
155 if (diff > 0) {
156 if (qp->resp.sent_psn_nak)
157 return RESPST_CLEANUP;
158
159 qp->resp.sent_psn_nak = 1;
160 return RESPST_ERR_PSN_OUT_OF_SEQ;
161
162 } else if (diff < 0) {
163 return RESPST_DUPLICATE_REQUEST;
164 }
165
166 if (qp->resp.sent_psn_nak)
167 qp->resp.sent_psn_nak = 0;
168
169 break;
170
171 case IB_QPT_UC:
172 if (qp->resp.drop_msg || diff != 0) {
173 if (pkt->mask & RXE_START_MASK) {
174 qp->resp.drop_msg = 0;
175 return RESPST_CHK_OP_SEQ;
176 }
177
178 qp->resp.drop_msg = 1;
179 return RESPST_CLEANUP;
180 }
181 break;
182 default:
183 break;
184 }
185
186 return RESPST_CHK_OP_SEQ;
187}
188
189static enum resp_states check_op_seq(struct rxe_qp *qp,
190 struct rxe_pkt_info *pkt)
191{
192 switch (qp_type(qp)) {
193 case IB_QPT_RC:
194 switch (qp->resp.opcode) {
195 case IB_OPCODE_RC_SEND_FIRST:
196 case IB_OPCODE_RC_SEND_MIDDLE:
197 switch (pkt->opcode) {
198 case IB_OPCODE_RC_SEND_MIDDLE:
199 case IB_OPCODE_RC_SEND_LAST:
200 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
201 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
202 return RESPST_CHK_OP_VALID;
203 default:
204 return RESPST_ERR_MISSING_OPCODE_LAST_C;
205 }
206
207 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
208 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
209 switch (pkt->opcode) {
210 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
211 case IB_OPCODE_RC_RDMA_WRITE_LAST:
212 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
213 return RESPST_CHK_OP_VALID;
214 default:
215 return RESPST_ERR_MISSING_OPCODE_LAST_C;
216 }
217
218 default:
219 switch (pkt->opcode) {
220 case IB_OPCODE_RC_SEND_MIDDLE:
221 case IB_OPCODE_RC_SEND_LAST:
222 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
223 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
224 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
225 case IB_OPCODE_RC_RDMA_WRITE_LAST:
226 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
227 return RESPST_ERR_MISSING_OPCODE_FIRST;
228 default:
229 return RESPST_CHK_OP_VALID;
230 }
231 }
232 break;
233
234 case IB_QPT_UC:
235 switch (qp->resp.opcode) {
236 case IB_OPCODE_UC_SEND_FIRST:
237 case IB_OPCODE_UC_SEND_MIDDLE:
238 switch (pkt->opcode) {
239 case IB_OPCODE_UC_SEND_MIDDLE:
240 case IB_OPCODE_UC_SEND_LAST:
241 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
242 return RESPST_CHK_OP_VALID;
243 default:
244 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
245 }
246
247 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
248 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
249 switch (pkt->opcode) {
250 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
251 case IB_OPCODE_UC_RDMA_WRITE_LAST:
252 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
253 return RESPST_CHK_OP_VALID;
254 default:
255 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
256 }
257
258 default:
259 switch (pkt->opcode) {
260 case IB_OPCODE_UC_SEND_MIDDLE:
261 case IB_OPCODE_UC_SEND_LAST:
262 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
263 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
264 case IB_OPCODE_UC_RDMA_WRITE_LAST:
265 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
266 qp->resp.drop_msg = 1;
267 return RESPST_CLEANUP;
268 default:
269 return RESPST_CHK_OP_VALID;
270 }
271 }
272 break;
273
274 default:
275 return RESPST_CHK_OP_VALID;
276 }
277}
278
279static enum resp_states check_op_valid(struct rxe_qp *qp,
280 struct rxe_pkt_info *pkt)
281{
282 switch (qp_type(qp)) {
283 case IB_QPT_RC:
284 if (((pkt->mask & RXE_READ_MASK) &&
285 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
286 ((pkt->mask & RXE_WRITE_MASK) &&
287 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
288 ((pkt->mask & RXE_ATOMIC_MASK) &&
289 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
290 return RESPST_ERR_UNSUPPORTED_OPCODE;
291 }
292
293 break;
294
295 case IB_QPT_UC:
296 if ((pkt->mask & RXE_WRITE_MASK) &&
297 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
298 qp->resp.drop_msg = 1;
299 return RESPST_CLEANUP;
300 }
301
302 break;
303
304 case IB_QPT_UD:
305 case IB_QPT_SMI:
306 case IB_QPT_GSI:
307 break;
308
309 default:
310 WARN_ON(1);
311 break;
312 }
313
314 return RESPST_CHK_RESOURCE;
315}
316
317static enum resp_states get_srq_wqe(struct rxe_qp *qp)
318{
319 struct rxe_srq *srq = qp->srq;
320 struct rxe_queue *q = srq->rq.queue;
321 struct rxe_recv_wqe *wqe;
322 struct ib_event ev;
323
324 if (srq->error)
325 return RESPST_ERR_RNR;
326
327 spin_lock_bh(&srq->rq.consumer_lock);
328
329 wqe = queue_head(q);
330 if (!wqe) {
331 spin_unlock_bh(&srq->rq.consumer_lock);
332 return RESPST_ERR_RNR;
333 }
334
335 /* note kernel and user space recv wqes have same size */
336 memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
337
338 qp->resp.wqe = &qp->resp.srq_wqe.wqe;
339 advance_consumer(q);
340
341 if (srq->limit && srq->ibsrq.event_handler &&
342 (queue_count(q) < srq->limit)) {
343 srq->limit = 0;
344 goto event;
345 }
346
347 spin_unlock_bh(&srq->rq.consumer_lock);
348 return RESPST_CHK_LENGTH;
349
350event:
351 spin_unlock_bh(&srq->rq.consumer_lock);
352 ev.device = qp->ibqp.device;
353 ev.element.srq = qp->ibqp.srq;
354 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
355 srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
356 return RESPST_CHK_LENGTH;
357}
358
359static enum resp_states check_resource(struct rxe_qp *qp,
360 struct rxe_pkt_info *pkt)
361{
362 struct rxe_srq *srq = qp->srq;
363
364 if (qp->resp.state == QP_STATE_ERROR) {
365 if (qp->resp.wqe) {
366 qp->resp.status = IB_WC_WR_FLUSH_ERR;
367 return RESPST_COMPLETE;
368 } else if (!srq) {
369 qp->resp.wqe = queue_head(qp->rq.queue);
370 if (qp->resp.wqe) {
371 qp->resp.status = IB_WC_WR_FLUSH_ERR;
372 return RESPST_COMPLETE;
373 } else {
374 return RESPST_EXIT;
375 }
376 } else {
377 return RESPST_EXIT;
378 }
379 }
380
381 if (pkt->mask & RXE_READ_OR_ATOMIC) {
382 /* it is the requesters job to not send
383 * too many read/atomic ops, we just
384 * recycle the responder resource queue
385 */
Parav Panditb6bbee02016-09-28 20:26:44 +0000386 if (likely(qp->attr.max_dest_rd_atomic > 0))
Moni Shoua8700e3e2016-06-16 16:45:23 +0300387 return RESPST_CHK_LENGTH;
388 else
389 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
390 }
391
392 if (pkt->mask & RXE_RWR_MASK) {
393 if (srq)
394 return get_srq_wqe(qp);
395
396 qp->resp.wqe = queue_head(qp->rq.queue);
397 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
398 }
399
400 return RESPST_CHK_LENGTH;
401}
402
403static enum resp_states check_length(struct rxe_qp *qp,
404 struct rxe_pkt_info *pkt)
405{
406 switch (qp_type(qp)) {
407 case IB_QPT_RC:
408 return RESPST_CHK_RKEY;
409
410 case IB_QPT_UC:
411 return RESPST_CHK_RKEY;
412
413 default:
414 return RESPST_CHK_RKEY;
415 }
416}
417
418static enum resp_states check_rkey(struct rxe_qp *qp,
419 struct rxe_pkt_info *pkt)
420{
421 struct rxe_mem *mem;
422 u64 va;
423 u32 rkey;
424 u32 resid;
425 u32 pktlen;
426 int mtu = qp->mtu;
427 enum resp_states state;
428 int access;
429
430 if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
431 if (pkt->mask & RXE_RETH_MASK) {
432 qp->resp.va = reth_va(pkt);
433 qp->resp.rkey = reth_rkey(pkt);
434 qp->resp.resid = reth_len(pkt);
435 }
436 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
437 : IB_ACCESS_REMOTE_WRITE;
438 } else if (pkt->mask & RXE_ATOMIC_MASK) {
439 qp->resp.va = atmeth_va(pkt);
440 qp->resp.rkey = atmeth_rkey(pkt);
441 qp->resp.resid = sizeof(u64);
442 access = IB_ACCESS_REMOTE_ATOMIC;
443 } else {
444 return RESPST_EXECUTE;
445 }
446
447 va = qp->resp.va;
448 rkey = qp->resp.rkey;
449 resid = qp->resp.resid;
450 pktlen = payload_size(pkt);
451
452 mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
453 if (!mem) {
454 state = RESPST_ERR_RKEY_VIOLATION;
455 goto err1;
456 }
457
458 if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
459 state = RESPST_ERR_RKEY_VIOLATION;
460 goto err1;
461 }
462
463 if (mem_check_range(mem, va, resid)) {
464 state = RESPST_ERR_RKEY_VIOLATION;
465 goto err2;
466 }
467
468 if (pkt->mask & RXE_WRITE_MASK) {
469 if (resid > mtu) {
470 if (pktlen != mtu || bth_pad(pkt)) {
471 state = RESPST_ERR_LENGTH;
472 goto err2;
473 }
474
475 resid = mtu;
476 } else {
477 if (pktlen != resid) {
478 state = RESPST_ERR_LENGTH;
479 goto err2;
480 }
481 if ((bth_pad(pkt) != (0x3 & (-resid)))) {
482 /* This case may not be exactly that
483 * but nothing else fits.
484 */
485 state = RESPST_ERR_LENGTH;
486 goto err2;
487 }
488 }
489 }
490
491 WARN_ON(qp->resp.mr);
492
493 qp->resp.mr = mem;
494 return RESPST_EXECUTE;
495
496err2:
497 rxe_drop_ref(mem);
498err1:
499 return state;
500}
501
502static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
503 int data_len)
504{
505 int err;
506 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
507
508 err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
509 data_addr, data_len, to_mem_obj, NULL);
510 if (unlikely(err))
511 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
512 : RESPST_ERR_MALFORMED_WQE;
513
514 return RESPST_NONE;
515}
516
517static enum resp_states write_data_in(struct rxe_qp *qp,
518 struct rxe_pkt_info *pkt)
519{
520 enum resp_states rc = RESPST_NONE;
521 int err;
522 int data_len = payload_size(pkt);
523
524 err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
525 data_len, to_mem_obj, NULL);
526 if (err) {
527 rc = RESPST_ERR_RKEY_VIOLATION;
528 goto out;
529 }
530
531 qp->resp.va += data_len;
532 qp->resp.resid -= data_len;
533
534out:
535 return rc;
536}
537
538/* Guarantee atomicity of atomic operations at the machine level. */
539static DEFINE_SPINLOCK(atomic_ops_lock);
540
541static enum resp_states process_atomic(struct rxe_qp *qp,
542 struct rxe_pkt_info *pkt)
543{
544 u64 iova = atmeth_va(pkt);
545 u64 *vaddr;
546 enum resp_states ret;
547 struct rxe_mem *mr = qp->resp.mr;
548
549 if (mr->state != RXE_MEM_STATE_VALID) {
550 ret = RESPST_ERR_RKEY_VIOLATION;
551 goto out;
552 }
553
554 vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
555
556 /* check vaddr is 8 bytes aligned. */
557 if (!vaddr || (uintptr_t)vaddr & 7) {
558 ret = RESPST_ERR_MISALIGNED_ATOMIC;
559 goto out;
560 }
561
562 spin_lock_bh(&atomic_ops_lock);
563
564 qp->resp.atomic_orig = *vaddr;
565
566 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
567 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
568 if (*vaddr == atmeth_comp(pkt))
569 *vaddr = atmeth_swap_add(pkt);
570 } else {
571 *vaddr += atmeth_swap_add(pkt);
572 }
573
574 spin_unlock_bh(&atomic_ops_lock);
575
576 ret = RESPST_NONE;
577out:
578 return ret;
579}
580
581static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
582 struct rxe_pkt_info *pkt,
583 struct rxe_pkt_info *ack,
584 int opcode,
585 int payload,
586 u32 psn,
587 u8 syndrome,
588 u32 *crcp)
589{
590 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
591 struct sk_buff *skb;
592 u32 crc = 0;
593 u32 *p;
594 int paylen;
595 int pad;
596 int err;
597
598 /*
599 * allocate packet
600 */
601 pad = (-payload) & 0x3;
602 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
603
604 skb = rxe->ifc_ops->init_packet(rxe, &qp->pri_av, paylen, ack);
605 if (!skb)
606 return NULL;
607
608 ack->qp = qp;
609 ack->opcode = opcode;
610 ack->mask = rxe_opcode[opcode].mask;
611 ack->offset = pkt->offset;
612 ack->paylen = paylen;
613
614 /* fill in bth using the request packet headers */
615 memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
616
617 bth_set_opcode(ack, opcode);
618 bth_set_qpn(ack, qp->attr.dest_qp_num);
619 bth_set_pad(ack, pad);
620 bth_set_se(ack, 0);
621 bth_set_psn(ack, psn);
622 bth_set_ack(ack, 0);
623 ack->psn = psn;
624
625 if (ack->mask & RXE_AETH_MASK) {
626 aeth_set_syn(ack, syndrome);
627 aeth_set_msn(ack, qp->resp.msn);
628 }
629
630 if (ack->mask & RXE_ATMACK_MASK)
631 atmack_set_orig(ack, qp->resp.atomic_orig);
632
633 err = rxe->ifc_ops->prepare(rxe, ack, skb, &crc);
634 if (err) {
635 kfree_skb(skb);
636 return NULL;
637 }
638
639 if (crcp) {
640 /* CRC computation will be continued by the caller */
641 *crcp = crc;
642 } else {
643 p = payload_addr(ack) + payload + bth_pad(ack);
644 *p = ~crc;
645 }
646
647 return skb;
648}
649
650/* RDMA read response. If res is not NULL, then we have a current RDMA request
651 * being processed or replayed.
652 */
653static enum resp_states read_reply(struct rxe_qp *qp,
654 struct rxe_pkt_info *req_pkt)
655{
656 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
657 struct rxe_pkt_info ack_pkt;
658 struct sk_buff *skb;
659 int mtu = qp->mtu;
660 enum resp_states state;
661 int payload;
662 int opcode;
663 int err;
664 struct resp_res *res = qp->resp.res;
665 u32 icrc;
666 u32 *p;
667
668 if (!res) {
669 /* This is the first time we process that request. Get a
670 * resource
671 */
672 res = &qp->resp.resources[qp->resp.res_head];
673
674 free_rd_atomic_resource(qp, res);
675 rxe_advance_resp_resource(qp);
676
677 res->type = RXE_READ_MASK;
678
679 res->read.va = qp->resp.va;
680 res->read.va_org = qp->resp.va;
681
682 res->first_psn = req_pkt->psn;
683 res->last_psn = req_pkt->psn +
684 (reth_len(req_pkt) + mtu - 1) /
685 mtu - 1;
686 res->cur_psn = req_pkt->psn;
687
688 res->read.resid = qp->resp.resid;
689 res->read.length = qp->resp.resid;
690 res->read.rkey = qp->resp.rkey;
691
692 /* note res inherits the reference to mr from qp */
693 res->read.mr = qp->resp.mr;
694 qp->resp.mr = NULL;
695
696 qp->resp.res = res;
697 res->state = rdatm_res_state_new;
698 }
699
700 if (res->state == rdatm_res_state_new) {
701 if (res->read.resid <= mtu)
702 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
703 else
704 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
705 } else {
706 if (res->read.resid > mtu)
707 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
708 else
709 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
710 }
711
712 res->state = rdatm_res_state_next;
713
714 payload = min_t(int, res->read.resid, mtu);
715
716 skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
717 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
718 if (!skb)
719 return RESPST_ERR_RNR;
720
721 err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
722 payload, from_mem_obj, &icrc);
723 if (err)
724 pr_err("Failed copying memory\n");
725
726 p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
727 *p = ~icrc;
728
729 err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
730 if (err) {
731 pr_err("Failed sending RDMA reply.\n");
732 kfree_skb(skb);
733 return RESPST_ERR_RNR;
734 }
735
736 res->read.va += payload;
737 res->read.resid -= payload;
738 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
739
740 if (res->read.resid > 0) {
741 state = RESPST_DONE;
742 } else {
743 qp->resp.res = NULL;
744 qp->resp.opcode = -1;
745 qp->resp.psn = res->cur_psn;
746 state = RESPST_CLEANUP;
747 }
748
749 return state;
750}
751
752/* Executes a new request. A retried request never reach that function (send
753 * and writes are discarded, and reads and atomics are retried elsewhere.
754 */
755static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
756{
757 enum resp_states err;
758
759 if (pkt->mask & RXE_SEND_MASK) {
760 if (qp_type(qp) == IB_QPT_UD ||
761 qp_type(qp) == IB_QPT_SMI ||
762 qp_type(qp) == IB_QPT_GSI) {
763 union rdma_network_hdr hdr;
764 struct sk_buff *skb = PKT_TO_SKB(pkt);
765
766 memset(&hdr, 0, sizeof(hdr));
767 if (skb->protocol == htons(ETH_P_IP))
768 memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
769 else if (skb->protocol == htons(ETH_P_IPV6))
770 memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
771
772 err = send_data_in(qp, &hdr, sizeof(hdr));
773 if (err)
774 return err;
775 }
776 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
777 if (err)
778 return err;
779 } else if (pkt->mask & RXE_WRITE_MASK) {
780 err = write_data_in(qp, pkt);
781 if (err)
782 return err;
783 } else if (pkt->mask & RXE_READ_MASK) {
784 /* For RDMA Read we can increment the msn now. See C9-148. */
785 qp->resp.msn++;
786 return RESPST_READ_REPLY;
787 } else if (pkt->mask & RXE_ATOMIC_MASK) {
788 err = process_atomic(qp, pkt);
789 if (err)
790 return err;
791 } else
792 /* Unreachable */
793 WARN_ON(1);
794
795 /* We successfully processed this new request. */
796 qp->resp.msn++;
797
798 /* next expected psn, read handles this separately */
799 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
800
801 qp->resp.opcode = pkt->opcode;
802 qp->resp.status = IB_WC_SUCCESS;
803
804 if (pkt->mask & RXE_COMP_MASK)
805 return RESPST_COMPLETE;
806 else if (qp_type(qp) == IB_QPT_RC)
807 return RESPST_ACKNOWLEDGE;
808 else
809 return RESPST_CLEANUP;
810}
811
812static enum resp_states do_complete(struct rxe_qp *qp,
813 struct rxe_pkt_info *pkt)
814{
815 struct rxe_cqe cqe;
816 struct ib_wc *wc = &cqe.ibwc;
817 struct ib_uverbs_wc *uwc = &cqe.uibwc;
818 struct rxe_recv_wqe *wqe = qp->resp.wqe;
819
820 if (unlikely(!wqe))
821 return RESPST_CLEANUP;
822
823 memset(&cqe, 0, sizeof(cqe));
824
825 wc->wr_id = wqe->wr_id;
826 wc->status = qp->resp.status;
827 wc->qp = &qp->ibqp;
828
829 /* fields after status are not required for errors */
830 if (wc->status == IB_WC_SUCCESS) {
831 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
832 pkt->mask & RXE_WRITE_MASK) ?
833 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
834 wc->vendor_err = 0;
835 wc->byte_len = wqe->dma.length - wqe->dma.resid;
836
837 /* fields after byte_len are different between kernel and user
838 * space
839 */
840 if (qp->rcq->is_user) {
841 uwc->wc_flags = IB_WC_GRH;
842
843 if (pkt->mask & RXE_IMMDT_MASK) {
844 uwc->wc_flags |= IB_WC_WITH_IMM;
845 uwc->ex.imm_data =
846 (__u32 __force)immdt_imm(pkt);
847 }
848
849 if (pkt->mask & RXE_IETH_MASK) {
850 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
851 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
852 }
853
854 uwc->qp_num = qp->ibqp.qp_num;
855
856 if (pkt->mask & RXE_DETH_MASK)
857 uwc->src_qp = deth_sqp(pkt);
858
859 uwc->port_num = qp->attr.port_num;
860 } else {
861 struct sk_buff *skb = PKT_TO_SKB(pkt);
862
863 wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
864 if (skb->protocol == htons(ETH_P_IP))
865 wc->network_hdr_type = RDMA_NETWORK_IPV4;
866 else
867 wc->network_hdr_type = RDMA_NETWORK_IPV6;
868
869 if (pkt->mask & RXE_IMMDT_MASK) {
870 wc->wc_flags |= IB_WC_WITH_IMM;
871 wc->ex.imm_data = immdt_imm(pkt);
872 }
873
874 if (pkt->mask & RXE_IETH_MASK) {
875 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
876 struct rxe_mem *rmr;
877
878 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
879 wc->ex.invalidate_rkey = ieth_rkey(pkt);
880
881 rmr = rxe_pool_get_index(&rxe->mr_pool,
882 wc->ex.invalidate_rkey >> 8);
883 if (unlikely(!rmr)) {
884 pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey);
885 return RESPST_ERROR;
886 }
887 rmr->state = RXE_MEM_STATE_FREE;
888 }
889
890 wc->qp = &qp->ibqp;
891
892 if (pkt->mask & RXE_DETH_MASK)
893 wc->src_qp = deth_sqp(pkt);
894
895 wc->port_num = qp->attr.port_num;
896 }
897 }
898
899 /* have copy for srq and reference for !srq */
900 if (!qp->srq)
901 advance_consumer(qp->rq.queue);
902
903 qp->resp.wqe = NULL;
904
905 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
906 return RESPST_ERR_CQ_OVERFLOW;
907
908 if (qp->resp.state == QP_STATE_ERROR)
909 return RESPST_CHK_RESOURCE;
910
911 if (!pkt)
912 return RESPST_DONE;
913 else if (qp_type(qp) == IB_QPT_RC)
914 return RESPST_ACKNOWLEDGE;
915 else
916 return RESPST_CLEANUP;
917}
918
919static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
920 u8 syndrome, u32 psn)
921{
922 int err = 0;
923 struct rxe_pkt_info ack_pkt;
924 struct sk_buff *skb;
925 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
926
927 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
928 0, psn, syndrome, NULL);
929 if (!skb) {
930 err = -ENOMEM;
931 goto err1;
932 }
933
934 err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
935 if (err) {
936 pr_err_ratelimited("Failed sending ack\n");
937 kfree_skb(skb);
938 }
939
940err1:
941 return err;
942}
943
944static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
945 u8 syndrome)
946{
947 int rc = 0;
948 struct rxe_pkt_info ack_pkt;
949 struct sk_buff *skb;
950 struct sk_buff *skb_copy;
951 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
952 struct resp_res *res;
953
954 skb = prepare_ack_packet(qp, pkt, &ack_pkt,
955 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
956 syndrome, NULL);
957 if (!skb) {
958 rc = -ENOMEM;
959 goto out;
960 }
961
962 skb_copy = skb_clone(skb, GFP_ATOMIC);
963 if (skb_copy)
964 rxe_add_ref(qp); /* for the new SKB */
965 else {
966 pr_warn("Could not clone atomic response\n");
967 rc = -ENOMEM;
968 goto out;
969 }
970
971 res = &qp->resp.resources[qp->resp.res_head];
972 free_rd_atomic_resource(qp, res);
973 rxe_advance_resp_resource(qp);
974
Yonatan Cohen90894882016-09-07 14:04:05 +0300975 memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
976
Moni Shoua8700e3e2016-06-16 16:45:23 +0300977 res->type = RXE_ATOMIC_MASK;
978 res->atomic.skb = skb;
Yonatan Cohen90894882016-09-07 14:04:05 +0300979 res->first_psn = ack_pkt.psn;
980 res->last_psn = ack_pkt.psn;
981 res->cur_psn = ack_pkt.psn;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300982
983 rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
984 if (rc) {
985 pr_err_ratelimited("Failed sending ack\n");
986 rxe_drop_ref(qp);
987 kfree_skb(skb_copy);
988 }
989
990out:
991 return rc;
992}
993
994static enum resp_states acknowledge(struct rxe_qp *qp,
995 struct rxe_pkt_info *pkt)
996{
997 if (qp_type(qp) != IB_QPT_RC)
998 return RESPST_CLEANUP;
999
1000 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1001 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1002 else if (pkt->mask & RXE_ATOMIC_MASK)
1003 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1004 else if (bth_ack(pkt))
1005 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1006
1007 return RESPST_CLEANUP;
1008}
1009
1010static enum resp_states cleanup(struct rxe_qp *qp,
1011 struct rxe_pkt_info *pkt)
1012{
1013 struct sk_buff *skb;
1014
1015 if (pkt) {
1016 skb = skb_dequeue(&qp->req_pkts);
1017 rxe_drop_ref(qp);
1018 kfree_skb(skb);
1019 }
1020
1021 if (qp->resp.mr) {
1022 rxe_drop_ref(qp->resp.mr);
1023 qp->resp.mr = NULL;
1024 }
1025
1026 return RESPST_DONE;
1027}
1028
1029static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1030{
1031 int i;
1032
1033 for (i = 0; i < qp->attr.max_rd_atomic; i++) {
1034 struct resp_res *res = &qp->resp.resources[i];
1035
1036 if (res->type == 0)
1037 continue;
1038
1039 if (psn_compare(psn, res->first_psn) >= 0 &&
1040 psn_compare(psn, res->last_psn) <= 0) {
1041 return res;
1042 }
1043 }
1044
1045 return NULL;
1046}
1047
1048static enum resp_states duplicate_request(struct rxe_qp *qp,
1049 struct rxe_pkt_info *pkt)
1050{
1051 enum resp_states rc;
1052
1053 if (pkt->mask & RXE_SEND_MASK ||
1054 pkt->mask & RXE_WRITE_MASK) {
1055 /* SEND. Ack again and cleanup. C9-105. */
1056 if (bth_ack(pkt))
1057 send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
1058 rc = RESPST_CLEANUP;
1059 goto out;
1060 } else if (pkt->mask & RXE_READ_MASK) {
1061 struct resp_res *res;
1062
1063 res = find_resource(qp, pkt->psn);
1064 if (!res) {
1065 /* Resource not found. Class D error. Drop the
1066 * request.
1067 */
1068 rc = RESPST_CLEANUP;
1069 goto out;
1070 } else {
1071 /* Ensure this new request is the same as the previous
1072 * one or a subset of it.
1073 */
1074 u64 iova = reth_va(pkt);
1075 u32 resid = reth_len(pkt);
1076
1077 if (iova < res->read.va_org ||
1078 resid > res->read.length ||
1079 (iova + resid) > (res->read.va_org +
1080 res->read.length)) {
1081 rc = RESPST_CLEANUP;
1082 goto out;
1083 }
1084
1085 if (reth_rkey(pkt) != res->read.rkey) {
1086 rc = RESPST_CLEANUP;
1087 goto out;
1088 }
1089
1090 res->cur_psn = pkt->psn;
1091 res->state = (pkt->psn == res->first_psn) ?
1092 rdatm_res_state_new :
1093 rdatm_res_state_replay;
1094
1095 /* Reset the resource, except length. */
1096 res->read.va_org = iova;
1097 res->read.va = iova;
1098 res->read.resid = resid;
1099
1100 /* Replay the RDMA read reply. */
1101 qp->resp.res = res;
1102 rc = RESPST_READ_REPLY;
1103 goto out;
1104 }
1105 } else {
1106 struct resp_res *res;
1107
1108 /* Find the operation in our list of responder resources. */
1109 res = find_resource(qp, pkt->psn);
1110 if (res) {
1111 struct sk_buff *skb_copy;
1112
1113 skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
1114 if (skb_copy) {
1115 rxe_add_ref(qp); /* for the new SKB */
1116 } else {
1117 pr_warn("Couldn't clone atomic resp\n");
1118 rc = RESPST_CLEANUP;
1119 goto out;
1120 }
Yonatan Cohen90894882016-09-07 14:04:05 +03001121
Moni Shoua8700e3e2016-06-16 16:45:23 +03001122 /* Resend the result. */
1123 rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
1124 pkt, skb_copy);
1125 if (rc) {
1126 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1127 kfree_skb(skb_copy);
1128 rc = RESPST_CLEANUP;
1129 goto out;
1130 }
1131 }
1132
1133 /* Resource not found. Class D error. Drop the request. */
1134 rc = RESPST_CLEANUP;
1135 goto out;
1136 }
1137out:
1138 return rc;
1139}
1140
1141/* Process a class A or C. Both are treated the same in this implementation. */
1142static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1143 enum ib_wc_status status)
1144{
1145 qp->resp.aeth_syndrome = syndrome;
1146 qp->resp.status = status;
1147
1148 /* indicate that we should go through the ERROR state */
1149 qp->resp.goto_error = 1;
1150}
1151
1152static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1153{
1154 /* UC */
1155 if (qp->srq) {
1156 /* Class E */
1157 qp->resp.drop_msg = 1;
1158 if (qp->resp.wqe) {
1159 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1160 return RESPST_COMPLETE;
1161 } else {
1162 return RESPST_CLEANUP;
1163 }
1164 } else {
1165 /* Class D1. This packet may be the start of a
1166 * new message and could be valid. The previous
1167 * message is invalid and ignored. reset the
1168 * recv wr to its original state
1169 */
1170 if (qp->resp.wqe) {
1171 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1172 qp->resp.wqe->dma.cur_sge = 0;
1173 qp->resp.wqe->dma.sge_offset = 0;
1174 qp->resp.opcode = -1;
1175 }
1176
1177 if (qp->resp.mr) {
1178 rxe_drop_ref(qp->resp.mr);
1179 qp->resp.mr = NULL;
1180 }
1181
1182 return RESPST_CLEANUP;
1183 }
1184}
1185
1186int rxe_responder(void *arg)
1187{
1188 struct rxe_qp *qp = (struct rxe_qp *)arg;
1189 enum resp_states state;
1190 struct rxe_pkt_info *pkt = NULL;
1191 int ret = 0;
1192
1193 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1194
1195 if (!qp->valid) {
1196 ret = -EINVAL;
1197 goto done;
1198 }
1199
1200 switch (qp->resp.state) {
1201 case QP_STATE_RESET:
1202 state = RESPST_RESET;
1203 break;
1204
1205 default:
1206 state = RESPST_GET_REQ;
1207 break;
1208 }
1209
1210 while (1) {
1211 pr_debug("state = %s\n", resp_state_name[state]);
1212 switch (state) {
1213 case RESPST_GET_REQ:
1214 state = get_req(qp, &pkt);
1215 break;
1216 case RESPST_CHK_PSN:
1217 state = check_psn(qp, pkt);
1218 break;
1219 case RESPST_CHK_OP_SEQ:
1220 state = check_op_seq(qp, pkt);
1221 break;
1222 case RESPST_CHK_OP_VALID:
1223 state = check_op_valid(qp, pkt);
1224 break;
1225 case RESPST_CHK_RESOURCE:
1226 state = check_resource(qp, pkt);
1227 break;
1228 case RESPST_CHK_LENGTH:
1229 state = check_length(qp, pkt);
1230 break;
1231 case RESPST_CHK_RKEY:
1232 state = check_rkey(qp, pkt);
1233 break;
1234 case RESPST_EXECUTE:
1235 state = execute(qp, pkt);
1236 break;
1237 case RESPST_COMPLETE:
1238 state = do_complete(qp, pkt);
1239 break;
1240 case RESPST_READ_REPLY:
1241 state = read_reply(qp, pkt);
1242 break;
1243 case RESPST_ACKNOWLEDGE:
1244 state = acknowledge(qp, pkt);
1245 break;
1246 case RESPST_CLEANUP:
1247 state = cleanup(qp, pkt);
1248 break;
1249 case RESPST_DUPLICATE_REQUEST:
1250 state = duplicate_request(qp, pkt);
1251 break;
1252 case RESPST_ERR_PSN_OUT_OF_SEQ:
1253 /* RC only - Class B. Drop packet. */
1254 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1255 state = RESPST_CLEANUP;
1256 break;
1257
1258 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1259 case RESPST_ERR_MISSING_OPCODE_FIRST:
1260 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1261 case RESPST_ERR_UNSUPPORTED_OPCODE:
1262 case RESPST_ERR_MISALIGNED_ATOMIC:
1263 /* RC Only - Class C. */
1264 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1265 IB_WC_REM_INV_REQ_ERR);
1266 state = RESPST_COMPLETE;
1267 break;
1268
1269 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1270 state = do_class_d1e_error(qp);
1271 break;
1272 case RESPST_ERR_RNR:
1273 if (qp_type(qp) == IB_QPT_RC) {
1274 /* RC - class B */
1275 send_ack(qp, pkt, AETH_RNR_NAK |
1276 (~AETH_TYPE_MASK &
1277 qp->attr.min_rnr_timer),
1278 pkt->psn);
1279 } else {
1280 /* UD/UC - class D */
1281 qp->resp.drop_msg = 1;
1282 }
1283 state = RESPST_CLEANUP;
1284 break;
1285
1286 case RESPST_ERR_RKEY_VIOLATION:
1287 if (qp_type(qp) == IB_QPT_RC) {
1288 /* Class C */
1289 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1290 IB_WC_REM_ACCESS_ERR);
1291 state = RESPST_COMPLETE;
1292 } else {
1293 qp->resp.drop_msg = 1;
1294 if (qp->srq) {
1295 /* UC/SRQ Class D */
1296 qp->resp.status = IB_WC_REM_ACCESS_ERR;
1297 state = RESPST_COMPLETE;
1298 } else {
1299 /* UC/non-SRQ Class E. */
1300 state = RESPST_CLEANUP;
1301 }
1302 }
1303 break;
1304
1305 case RESPST_ERR_LENGTH:
1306 if (qp_type(qp) == IB_QPT_RC) {
1307 /* Class C */
1308 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1309 IB_WC_REM_INV_REQ_ERR);
1310 state = RESPST_COMPLETE;
1311 } else if (qp->srq) {
1312 /* UC/UD - class E */
1313 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1314 state = RESPST_COMPLETE;
1315 } else {
1316 /* UC/UD - class D */
1317 qp->resp.drop_msg = 1;
1318 state = RESPST_CLEANUP;
1319 }
1320 break;
1321
1322 case RESPST_ERR_MALFORMED_WQE:
1323 /* All, Class A. */
1324 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1325 IB_WC_LOC_QP_OP_ERR);
1326 state = RESPST_COMPLETE;
1327 break;
1328
1329 case RESPST_ERR_CQ_OVERFLOW:
1330 /* All - Class G */
1331 state = RESPST_ERROR;
1332 break;
1333
1334 case RESPST_DONE:
1335 if (qp->resp.goto_error) {
1336 state = RESPST_ERROR;
1337 break;
1338 }
1339
1340 goto done;
1341
1342 case RESPST_EXIT:
1343 if (qp->resp.goto_error) {
1344 state = RESPST_ERROR;
1345 break;
1346 }
1347
1348 goto exit;
1349
1350 case RESPST_RESET: {
1351 struct sk_buff *skb;
1352
1353 while ((skb = skb_dequeue(&qp->req_pkts))) {
1354 rxe_drop_ref(qp);
1355 kfree_skb(skb);
1356 }
1357
1358 while (!qp->srq && qp->rq.queue &&
1359 queue_head(qp->rq.queue))
1360 advance_consumer(qp->rq.queue);
1361
1362 qp->resp.wqe = NULL;
1363 goto exit;
1364 }
1365
1366 case RESPST_ERROR:
1367 qp->resp.goto_error = 0;
1368 pr_warn("qp#%d moved to error state\n", qp_num(qp));
1369 rxe_qp_error(qp);
1370 goto exit;
1371
1372 default:
1373 WARN_ON(1);
1374 }
1375 }
1376
1377exit:
1378 ret = -EAGAIN;
1379done:
1380 return ret;
1381}