blob: 7541b0dada59fa9698126cf6549c5b75c6079ef3 [file] [log] [blame]
Faisal Latif786c6ad2016-01-20 13:40:05 -06001/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include "i40iw_osdep.h"
36#include "i40iw_register.h"
37#include "i40iw_status.h"
38#include "i40iw_hmc.h"
39
40#include "i40iw_d.h"
41#include "i40iw_type.h"
42#include "i40iw_p.h"
43#include "i40iw_puda.h"
44
45static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
46 struct i40iw_puda_buf *buf);
47static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
48static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
49static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
50 *rsrc, bool initial);
51/**
52 * i40iw_puda_get_listbuf - get buffer from puda list
53 * @list: list to use for buffers (ILQ or IEQ)
54 */
55static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
56{
57 struct i40iw_puda_buf *buf = NULL;
58
59 if (!list_empty(list)) {
60 buf = (struct i40iw_puda_buf *)list->next;
61 list_del((struct list_head *)&buf->list);
62 }
63 return buf;
64}
65
66/**
67 * i40iw_puda_get_bufpool - return buffer from resource
68 * @rsrc: resource to use for buffer
69 */
70struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
71{
72 struct i40iw_puda_buf *buf = NULL;
73 struct list_head *list = &rsrc->bufpool;
74 unsigned long flags;
75
76 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
77 buf = i40iw_puda_get_listbuf(list);
78 if (buf)
79 rsrc->avail_buf_count--;
80 else
81 rsrc->stats_buf_alloc_fail++;
82 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
83 return buf;
84}
85
86/**
87 * i40iw_puda_ret_bufpool - return buffer to rsrc list
88 * @rsrc: resource to use for buffer
89 * @buf: buffe to return to resouce
90 */
91void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
92 struct i40iw_puda_buf *buf)
93{
94 unsigned long flags;
95
96 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
97 list_add(&buf->list, &rsrc->bufpool);
98 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
99 rsrc->avail_buf_count++;
100}
101
102/**
103 * i40iw_puda_post_recvbuf - set wqe for rcv buffer
104 * @rsrc: resource ptr
105 * @wqe_idx: wqe index to use
106 * @buf: puda buffer for rcv q
107 * @initial: flag if during init time
108 */
109static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
110 struct i40iw_puda_buf *buf, bool initial)
111{
112 u64 *wqe;
113 struct i40iw_sc_qp *qp = &rsrc->qp;
114 u64 offset24 = 0;
115
116 qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
117 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
118 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
119 "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
120 wqe_idx, buf, wqe);
121 if (!initial)
122 get_64bit_val(wqe, 24, &offset24);
123
124 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
125 set_64bit_val(wqe, 24, offset24);
126
127 set_64bit_val(wqe, 0, buf->mem.pa);
128 set_64bit_val(wqe, 8,
129 LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
130 set_64bit_val(wqe, 24, offset24);
131}
132
133/**
134 * i40iw_puda_replenish_rq - post rcv buffers
135 * @rsrc: resource to use for buffer
136 * @initial: flag if during init time
137 */
138static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
139 bool initial)
140{
141 u32 i;
142 u32 invalid_cnt = rsrc->rxq_invalid_cnt;
143 struct i40iw_puda_buf *buf = NULL;
144
145 for (i = 0; i < invalid_cnt; i++) {
146 buf = i40iw_puda_get_bufpool(rsrc);
147 if (!buf)
148 return I40IW_ERR_list_empty;
149 i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
150 initial);
151 rsrc->rx_wqe_idx =
152 ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
153 rsrc->rxq_invalid_cnt--;
154 }
155 return 0;
156}
157
158/**
159 * i40iw_puda_alloc_buf - allocate mem for buffer
160 * @dev: iwarp device
161 * @length: length of buffer
162 */
163static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
164 u32 length)
165{
166 struct i40iw_puda_buf *buf = NULL;
167 struct i40iw_virt_mem buf_mem;
168 enum i40iw_status_code ret;
169
170 ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
171 sizeof(struct i40iw_puda_buf));
172 if (ret) {
173 i40iw_debug(dev, I40IW_DEBUG_PUDA,
174 "%s: error mem for buf\n", __func__);
175 return NULL;
176 }
177 buf = (struct i40iw_puda_buf *)buf_mem.va;
178 ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
179 if (ret) {
180 i40iw_debug(dev, I40IW_DEBUG_PUDA,
181 "%s: error dma mem for buf\n", __func__);
182 i40iw_free_virt_mem(dev->hw, &buf_mem);
183 return NULL;
184 }
185 buf->buf_mem.va = buf_mem.va;
186 buf->buf_mem.size = buf_mem.size;
187 return buf;
188}
189
190/**
191 * i40iw_puda_dele_buf - delete buffer back to system
192 * @dev: iwarp device
193 * @buf: buffer to free
194 */
195static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
196 struct i40iw_puda_buf *buf)
197{
198 i40iw_free_dma_mem(dev->hw, &buf->mem);
199 i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
200}
201
202/**
203 * i40iw_puda_get_next_send_wqe - return next wqe for processing
204 * @qp: puda qp for wqe
205 * @wqe_idx: wqe index for caller
206 */
207static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
208{
209 u64 *wqe = NULL;
210 enum i40iw_status_code ret_code = 0;
211
212 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
213 if (!*wqe_idx)
214 qp->swqe_polarity = !qp->swqe_polarity;
215 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
216 if (ret_code)
217 return wqe;
218 wqe = qp->sq_base[*wqe_idx].elem;
219
220 return wqe;
221}
222
223/**
224 * i40iw_puda_poll_info - poll cq for completion
225 * @cq: cq for poll
226 * @info: info return for successful completion
227 */
228static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
229 struct i40iw_puda_completion_info *info)
230{
231 u64 qword0, qword2, qword3;
232 u64 *cqe;
233 u64 comp_ctx;
234 bool valid_bit;
235 u32 major_err, minor_err;
236 bool error;
237
238 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
239 get_64bit_val(cqe, 24, &qword3);
240 valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
241
242 if (valid_bit != cq->cq_uk.polarity)
243 return I40IW_ERR_QUEUE_EMPTY;
244
245 i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
246 error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
247 if (error) {
248 i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
249 major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
250 minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
251 info->compl_error = major_err << 16 | minor_err;
252 return I40IW_ERR_CQ_COMPL_ERROR;
253 }
254
255 get_64bit_val(cqe, 0, &qword0);
256 get_64bit_val(cqe, 16, &qword2);
257
258 info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
259 info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
260
261 get_64bit_val(cqe, 8, &comp_ctx);
262 info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
263 info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
264
265 if (info->q_type == I40IW_CQE_QTYPE_RQ) {
266 info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
267 info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
268 info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
269 info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
270 }
271
272 return 0;
273}
274
275/**
276 * i40iw_puda_poll_completion - processes completion for cq
277 * @dev: iwarp device
278 * @cq: cq getting interrupt
279 * @compl_err: return any completion err
280 */
281enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
282 struct i40iw_sc_cq *cq, u32 *compl_err)
283{
284 struct i40iw_qp_uk *qp;
285 struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
286 struct i40iw_puda_completion_info info;
287 enum i40iw_status_code ret = 0;
288 struct i40iw_puda_buf *buf;
289 struct i40iw_puda_rsrc *rsrc;
290 void *sqwrid;
291 u8 cq_type = cq->cq_type;
292 unsigned long flags;
293
294 if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
295 rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
296 } else {
297 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
298 return I40IW_ERR_BAD_PTR;
299 }
300 memset(&info, 0, sizeof(info));
301 ret = i40iw_puda_poll_info(cq, &info);
302 *compl_err = info.compl_error;
303 if (ret == I40IW_ERR_QUEUE_EMPTY)
304 return ret;
305 if (ret)
306 goto done;
307
308 qp = info.qp;
309 if (!qp || !rsrc) {
310 ret = I40IW_ERR_BAD_PTR;
311 goto done;
312 }
313
314 if (qp->qp_id != rsrc->qp_id) {
315 ret = I40IW_ERR_BAD_PTR;
316 goto done;
317 }
318
319 if (info.q_type == I40IW_CQE_QTYPE_RQ) {
320 buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
321 /* Get all the tcpip information in the buf header */
322 ret = i40iw_puda_get_tcpip_info(&info, buf);
323 if (ret) {
324 rsrc->stats_rcvd_pkt_err++;
325 if (cq_type == I40IW_CQ_TYPE_ILQ) {
326 i40iw_ilq_putback_rcvbuf(&rsrc->qp,
327 info.wqe_idx);
328 } else {
329 i40iw_puda_ret_bufpool(rsrc, buf);
330 i40iw_puda_replenish_rq(rsrc, false);
331 }
332 goto done;
333 }
334
335 rsrc->stats_pkt_rcvd++;
336 rsrc->compl_rxwqe_idx = info.wqe_idx;
337 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
338 rsrc->receive(rsrc->dev, buf);
339 if (cq_type == I40IW_CQ_TYPE_ILQ)
340 i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
341 else
342 i40iw_puda_replenish_rq(rsrc, false);
343
344 } else {
345 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
346 sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
347 I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
348 rsrc->xmit_complete(rsrc->dev, sqwrid);
349 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
350 rsrc->tx_wqe_avail_cnt++;
351 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
352 if (!list_empty(&dev->ilq->txpend))
353 i40iw_puda_send_buf(dev->ilq, NULL);
354 }
355
356done:
357 I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
358 if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
359 cq_uk->polarity = !cq_uk->polarity;
360 /* update cq tail in cq shadow memory also */
361 I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
362 set_64bit_val(cq_uk->shadow_area, 0,
363 I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
364 return 0;
365}
366
367/**
368 * i40iw_puda_send - complete send wqe for transmit
369 * @qp: puda qp for send
370 * @info: buffer information for transmit
371 */
372enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
373 struct i40iw_puda_send_info *info)
374{
375 u64 *wqe;
376 u32 iplen, l4len;
377 u64 header[2];
378 u32 wqe_idx;
379 u8 iipt;
380
381 /* number of 32 bits DWORDS in header */
382 l4len = info->tcplen >> 2;
383 if (info->ipv4) {
384 iipt = 3;
385 iplen = 5;
386 } else {
387 iipt = 1;
388 iplen = 10;
389 }
390
391 wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
392 if (!wqe)
393 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
394 qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
395 /* Third line of WQE descriptor */
396 /* maclen is in words */
397 header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
398 LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
399 LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
400 LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
401 /* Forth line of WQE descriptor */
402 header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
403 LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
404 LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
405 LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
406
407 set_64bit_val(wqe, 0, info->paddr);
408 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
409 set_64bit_val(wqe, 16, header[0]);
410 set_64bit_val(wqe, 24, header[1]);
411
412 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
413 i40iw_qp_post_wr(&qp->qp_uk);
414 return 0;
415}
416
417/**
418 * i40iw_puda_send_buf - transmit puda buffer
419 * @rsrc: resource to use for buffer
420 * @buf: puda buffer to transmit
421 */
422void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
423{
424 struct i40iw_puda_send_info info;
425 enum i40iw_status_code ret = 0;
426 unsigned long flags;
427
428 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
429 /* if no wqe available or not from a completion and we have
430 * pending buffers, we must queue new buffer
431 */
432 if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
433 list_add_tail(&buf->list, &rsrc->txpend);
434 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
435 rsrc->stats_sent_pkt_q++;
436 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
437 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
438 "%s: adding to txpend\n", __func__);
439 return;
440 }
441 rsrc->tx_wqe_avail_cnt--;
442 /* if we are coming from a completion and have pending buffers
443 * then Get one from pending list
444 */
445 if (!buf) {
446 buf = i40iw_puda_get_listbuf(&rsrc->txpend);
447 if (!buf)
448 goto done;
449 }
450
451 info.scratch = (void *)buf;
452 info.paddr = buf->mem.pa;
453 info.len = buf->totallen;
454 info.tcplen = buf->tcphlen;
455 info.maclen = buf->maclen;
456 info.ipv4 = buf->ipv4;
457 info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
458
459 ret = i40iw_puda_send(&rsrc->qp, &info);
460 if (ret) {
461 rsrc->tx_wqe_avail_cnt++;
462 rsrc->stats_sent_pkt_q++;
463 list_add(&buf->list, &rsrc->txpend);
464 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
465 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
466 "%s: adding to puda_send\n", __func__);
467 } else {
468 rsrc->stats_pkt_sent++;
469 }
470done:
471 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
472}
473
474/**
475 * i40iw_puda_qp_setctx - during init, set qp's context
476 * @rsrc: qp's resource
477 */
478static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
479{
480 struct i40iw_sc_qp *qp = &rsrc->qp;
481 u64 *qp_ctx = qp->hw_host_ctx;
482
483 set_64bit_val(qp_ctx, 8, qp->sq_pa);
484 set_64bit_val(qp_ctx, 16, qp->rq_pa);
485
486 set_64bit_val(qp_ctx, 24,
487 LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
488 LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
489
490 set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
491 set_64bit_val(qp_ctx, 56, 0);
492 set_64bit_val(qp_ctx, 64, 1);
493
494 set_64bit_val(qp_ctx, 136,
495 LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
496 LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
497
498 set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
499
500 set_64bit_val(qp_ctx, 168,
501 LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
502
503 set_64bit_val(qp_ctx, 176,
504 LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
505 LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
506 LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
507
508 i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
509 qp_ctx, I40IW_QP_CTX_SIZE);
510}
511
512/**
513 * i40iw_puda_qp_wqe - setup wqe for qp create
514 * @rsrc: resource for qp
515 */
516static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
517{
518 struct i40iw_sc_qp *qp = &rsrc->qp;
519 struct i40iw_sc_dev *dev = rsrc->dev;
520 struct i40iw_sc_cqp *cqp;
521 u64 *wqe;
522 u64 header;
523 struct i40iw_ccq_cqe_info compl_info;
524 enum i40iw_status_code status = 0;
525
526 cqp = dev->cqp;
527 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
528 if (!wqe)
529 return I40IW_ERR_RING_FULL;
530
531 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
532 set_64bit_val(wqe, 40, qp->shadow_area_pa);
533 header = qp->qp_uk.qp_id |
534 LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
535 LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
536 LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
537 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
538 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
539
540 set_64bit_val(wqe, 24, header);
541
542 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
543 i40iw_sc_cqp_post_sq(cqp);
544 status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
545 I40IW_CQP_OP_CREATE_QP,
546 &compl_info);
547 return status;
548}
549
550/**
551 * i40iw_puda_qp_create - create qp for resource
552 * @rsrc: resource to use for buffer
553 */
554static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
555{
556 struct i40iw_sc_qp *qp = &rsrc->qp;
557 struct i40iw_qp_uk *ukqp = &qp->qp_uk;
558 enum i40iw_status_code ret = 0;
559 u32 sq_size, rq_size, t_size;
560 struct i40iw_dma_mem *mem;
561
562 sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
563 rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
564 t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
565 I40IW_QP_CTX_SIZE);
566 /* Get page aligned memory */
567 ret =
568 i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
569 I40IW_HW_PAGE_SIZE);
570 if (ret) {
571 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
572 return ret;
573 }
574
575 mem = &rsrc->qpmem;
576 memset(mem->va, 0, t_size);
577 qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
578 qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
579 qp->pd = &rsrc->sc_pd;
580 qp->qp_type = I40IW_QP_TYPE_UDA;
581 qp->dev = rsrc->dev;
582 qp->back_qp = (void *)rsrc;
583 qp->sq_pa = mem->pa;
584 qp->rq_pa = qp->sq_pa + sq_size;
585 ukqp->sq_base = mem->va;
586 ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
587 ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
588 qp->shadow_area_pa = qp->rq_pa + rq_size;
589 qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
590 qp->hw_host_ctx_pa =
591 qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
592 ukqp->qp_id = rsrc->qp_id;
593 ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
594 ukqp->rq_wrid_array = rsrc->rq_wrid_array;
595
596 ukqp->qp_id = rsrc->qp_id;
597 ukqp->sq_size = rsrc->sq_size;
598 ukqp->rq_size = rsrc->rq_size;
599
600 I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
601 I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
602 I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
603
604 if (qp->pd->dev->is_pf)
605 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
606 I40E_PFPE_WQEALLOC);
607 else
608 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
609 I40E_VFPE_WQEALLOC1);
610
Henry Orosco0fc2dc52016-10-10 21:12:10 -0500611 qp->user_pri = 0;
612 i40iw_qp_add_qos(rsrc->dev, qp);
Faisal Latif786c6ad2016-01-20 13:40:05 -0600613 i40iw_puda_qp_setctx(rsrc);
614 ret = i40iw_puda_qp_wqe(rsrc);
615 if (ret)
616 i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
617 return ret;
618}
619
620/**
621 * i40iw_puda_cq_create - create cq for resource
622 * @rsrc: resource for which cq to create
623 */
624static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
625{
626 struct i40iw_sc_dev *dev = rsrc->dev;
627 struct i40iw_sc_cq *cq = &rsrc->cq;
628 u64 *wqe;
629 struct i40iw_sc_cqp *cqp;
630 u64 header;
631 enum i40iw_status_code ret = 0;
632 u32 tsize, cqsize;
633 u32 shadow_read_threshold = 128;
634 struct i40iw_dma_mem *mem;
635 struct i40iw_ccq_cqe_info compl_info;
636 struct i40iw_cq_init_info info;
637 struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
638
639 cq->back_cq = (void *)rsrc;
640 cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
641 tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
642 ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
643 I40IW_CQ0_ALIGNMENT_MASK);
644 if (ret)
645 return ret;
646
647 mem = &rsrc->cqmem;
648 memset(&info, 0, sizeof(info));
649 info.dev = dev;
650 info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
651 I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
652 info.shadow_read_threshold = rsrc->cq_size >> 2;
653 info.ceq_id_valid = true;
654 info.cq_base_pa = mem->pa;
655 info.shadow_area_pa = mem->pa + cqsize;
656 init_info->cq_base = mem->va;
657 init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
658 init_info->cq_size = rsrc->cq_size;
659 init_info->cq_id = rsrc->cq_id;
660 ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
661 if (ret)
662 goto error;
663 cqp = dev->cqp;
664 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
665 if (!wqe) {
666 ret = I40IW_ERR_RING_FULL;
667 goto error;
668 }
669
670 set_64bit_val(wqe, 0, rsrc->cq_size);
671 set_64bit_val(wqe, 8, RS_64_1(cq, 1));
672 set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
673 set_64bit_val(wqe, 32, cq->cq_pa);
674
675 set_64bit_val(wqe, 40, cq->shadow_area_pa);
676
677 header = rsrc->cq_id |
678 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
679 LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
680 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
681 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
682 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
683 set_64bit_val(wqe, 24, header);
684
685 i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
686 wqe, I40IW_CQP_WQE_SIZE * 8);
687
688 i40iw_sc_cqp_post_sq(dev->cqp);
689 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
690 I40IW_CQP_OP_CREATE_CQ,
691 &compl_info);
692
693error:
694 if (ret)
695 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
696 return ret;
697}
698
699/**
700 * i40iw_puda_dele_resources - delete all resources during close
701 * @dev: iwarp device
702 * @type: type of resource to dele
703 * @reset: true if reset chip
704 */
705void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
706 enum puda_resource_type type,
707 bool reset)
708{
709 struct i40iw_ccq_cqe_info compl_info;
710 struct i40iw_puda_rsrc *rsrc;
711 struct i40iw_puda_buf *buf = NULL;
712 struct i40iw_puda_buf *nextbuf = NULL;
713 struct i40iw_virt_mem *vmem;
714 enum i40iw_status_code ret;
715
716 switch (type) {
717 case I40IW_PUDA_RSRC_TYPE_ILQ:
718 rsrc = dev->ilq;
719 vmem = &dev->ilq_mem;
720 break;
721 case I40IW_PUDA_RSRC_TYPE_IEQ:
722 rsrc = dev->ieq;
723 vmem = &dev->ieq_mem;
724 break;
725 default:
726 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
727 __func__, type);
728 return;
729 }
730
731 switch (rsrc->completion) {
732 case PUDA_HASH_CRC_COMPLETE:
Tatyana Nikolova34abf9e2016-03-18 10:38:33 -0500733 i40iw_free_hash_desc(rsrc->hash_desc);
Faisal Latif786c6ad2016-01-20 13:40:05 -0600734 case PUDA_QP_CREATED:
735 do {
736 if (reset)
737 break;
738 ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
739 0, false, true, true);
740 if (ret)
741 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
742 "%s error ieq qp destroy\n",
743 __func__);
744
745 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
746 I40IW_CQP_OP_DESTROY_QP,
747 &compl_info);
748 if (ret)
749 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
750 "%s error ieq qp destroy done\n",
751 __func__);
752 } while (0);
753
754 i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
755 /* fallthrough */
756 case PUDA_CQ_CREATED:
757 do {
758 if (reset)
759 break;
760 ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
761 if (ret)
762 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
763 "%s error ieq cq destroy\n",
764 __func__);
765
766 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
767 I40IW_CQP_OP_DESTROY_CQ,
768 &compl_info);
769 if (ret)
770 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
771 "%s error ieq qp destroy done\n",
772 __func__);
773 } while (0);
774
775 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
776 break;
777 default:
778 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
779 break;
780 }
781 /* Free all allocated puda buffers for both tx and rx */
782 buf = rsrc->alloclist;
783 while (buf) {
784 nextbuf = buf->next;
785 i40iw_puda_dele_buf(dev, buf);
786 buf = nextbuf;
787 rsrc->alloc_buf_count--;
788 }
789 i40iw_free_virt_mem(dev->hw, vmem);
790}
791
792/**
793 * i40iw_puda_allocbufs - allocate buffers for resource
794 * @rsrc: resource for buffer allocation
795 * @count: number of buffers to create
796 */
797static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
798 u32 count)
799{
800 u32 i;
801 struct i40iw_puda_buf *buf;
802 struct i40iw_puda_buf *nextbuf;
803
804 for (i = 0; i < count; i++) {
805 buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
806 if (!buf) {
807 rsrc->stats_buf_alloc_fail++;
808 return I40IW_ERR_NO_MEMORY;
809 }
810 i40iw_puda_ret_bufpool(rsrc, buf);
811 rsrc->alloc_buf_count++;
812 if (!rsrc->alloclist) {
813 rsrc->alloclist = buf;
814 } else {
815 nextbuf = rsrc->alloclist;
816 rsrc->alloclist = buf;
817 buf->next = nextbuf;
818 }
819 }
820 rsrc->avail_buf_count = rsrc->alloc_buf_count;
821 return 0;
822}
823
824/**
825 * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
826 * @dev: iwarp device
827 * @info: resource information
828 */
829enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
830 struct i40iw_puda_rsrc_info *info)
831{
832 enum i40iw_status_code ret = 0;
833 struct i40iw_puda_rsrc *rsrc;
834 u32 pudasize;
835 u32 sqwridsize, rqwridsize;
836 struct i40iw_virt_mem *vmem;
837
838 info->count = 1;
839 pudasize = sizeof(struct i40iw_puda_rsrc);
840 sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
841 rqwridsize = info->rq_size * 8;
842 switch (info->type) {
843 case I40IW_PUDA_RSRC_TYPE_ILQ:
844 vmem = &dev->ilq_mem;
845 break;
846 case I40IW_PUDA_RSRC_TYPE_IEQ:
847 vmem = &dev->ieq_mem;
848 break;
849 default:
850 return I40IW_NOT_SUPPORTED;
851 }
852 ret =
853 i40iw_allocate_virt_mem(dev->hw, vmem,
854 pudasize + sqwridsize + rqwridsize);
855 if (ret)
856 return ret;
857 rsrc = (struct i40iw_puda_rsrc *)vmem->va;
858 spin_lock_init(&rsrc->bufpool_lock);
859 if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
860 dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
861 dev->ilq_count = info->count;
862 rsrc->receive = info->receive;
863 rsrc->xmit_complete = info->xmit_complete;
864 } else {
865 vmem = &dev->ieq_mem;
866 dev->ieq_count = info->count;
867 dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
868 rsrc->receive = i40iw_ieq_receive;
869 rsrc->xmit_complete = i40iw_ieq_tx_compl;
870 }
871
872 rsrc->type = info->type;
873 rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
874 rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
875 rsrc->mss = info->mss;
876 /* Initialize all ieq lists */
877 INIT_LIST_HEAD(&rsrc->bufpool);
878 INIT_LIST_HEAD(&rsrc->txpend);
879
880 rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
881 dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
882 rsrc->qp_id = info->qp_id;
883 rsrc->cq_id = info->cq_id;
884 rsrc->sq_size = info->sq_size;
885 rsrc->rq_size = info->rq_size;
886 rsrc->cq_size = info->rq_size + info->sq_size;
887 rsrc->buf_size = info->buf_size;
888 rsrc->dev = dev;
889
890 ret = i40iw_puda_cq_create(rsrc);
891 if (!ret) {
892 rsrc->completion = PUDA_CQ_CREATED;
893 ret = i40iw_puda_qp_create(rsrc);
894 }
895 if (ret) {
896 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
897 goto error;
898 }
899 rsrc->completion = PUDA_QP_CREATED;
900
901 ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
902 if (ret) {
903 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
904 goto error;
905 }
906
907 rsrc->rxq_invalid_cnt = info->rq_size;
908 ret = i40iw_puda_replenish_rq(rsrc, true);
909 if (ret)
910 goto error;
911
912 if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
913 if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
914 rsrc->check_crc = true;
915 rsrc->completion = PUDA_HASH_CRC_COMPLETE;
916 ret = 0;
917 }
918 }
919
920 dev->ccq_ops->ccq_arm(&rsrc->cq);
921 return ret;
922 error:
923 i40iw_puda_dele_resources(dev, info->type, false);
924
925 return ret;
926}
927
928/**
929 * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
930 * @qp: ilq's qp resource
931 * @wqe_idx: wqe index of completed rcvbuf
932 */
933static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
934{
935 u64 *wqe;
936 u64 offset24;
937
938 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
939 get_64bit_val(wqe, 24, &offset24);
940 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
941 set_64bit_val(wqe, 24, offset24);
942}
943
944/**
945 * i40iw_ieq_get_fpdu - given length return fpdu length
946 * @length: length if fpdu
947 */
948static u16 i40iw_ieq_get_fpdu_length(u16 length)
949{
950 u16 fpdu_len;
951
952 fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
953 fpdu_len = (fpdu_len + 3) & 0xfffffffc;
954 return fpdu_len;
955}
956
957/**
958 * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
959 * @buf: rcv buffer with partial
960 * @txbuf: tx buffer for sendign back
961 * @buf_offset: rcv buffer offset to copy from
962 * @txbuf_offset: at offset in tx buf to copy
963 * @length: length of data to copy
964 */
965static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
966 struct i40iw_puda_buf *txbuf,
967 u16 buf_offset, u32 txbuf_offset,
968 u32 length)
969{
970 void *mem1 = (u8 *)buf->mem.va + buf_offset;
971 void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
972
973 memcpy(mem2, mem1, length);
974}
975
976/**
977 * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
978 * @buf: reeive buffer with partial
979 * @txbuf: buffer to prepare
980 */
981static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
982 struct i40iw_puda_buf *txbuf)
983{
984 txbuf->maclen = buf->maclen;
985 txbuf->tcphlen = buf->tcphlen;
986 txbuf->ipv4 = buf->ipv4;
987 txbuf->hdrlen = buf->hdrlen;
988 i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
989}
990
991/**
992 * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
993 * @buf: receive exception buffer
994 * @fps: first partial sequence number
995 */
996static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
997{
998 u32 offset;
999
1000 if (buf->seqnum < fps) {
1001 offset = fps - buf->seqnum;
1002 if (offset > buf->datalen)
1003 return;
1004 buf->data += offset;
1005 buf->datalen -= (u16)offset;
1006 buf->seqnum = fps;
1007 }
1008}
1009
1010/**
1011 * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
1012 * @ieq: ieq resource
1013 * @rxlist: ieq's received buffer list
1014 * @pbufl: temporary list for buffers for fpddu
1015 * @txbuf: tx buffer for fpdu
1016 * @fpdu_len: total length of fpdu
1017 */
1018static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
1019 struct list_head *rxlist,
1020 struct list_head *pbufl,
1021 struct i40iw_puda_buf *txbuf,
1022 u16 fpdu_len)
1023{
1024 struct i40iw_puda_buf *buf;
1025 u32 nextseqnum;
1026 u16 txoffset, bufoffset;
1027
1028 buf = i40iw_puda_get_listbuf(pbufl);
Mustafa Ismailda5c1382016-07-12 11:48:46 -05001029 if (!buf)
1030 return;
Faisal Latif786c6ad2016-01-20 13:40:05 -06001031 nextseqnum = buf->seqnum + fpdu_len;
1032 txbuf->totallen = buf->hdrlen + fpdu_len;
1033 txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
1034 i40iw_ieq_setup_tx_buf(buf, txbuf);
1035
1036 txoffset = buf->hdrlen;
1037 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1038
1039 do {
1040 if (buf->datalen >= fpdu_len) {
1041 /* copied full fpdu */
1042 i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
1043 buf->datalen -= fpdu_len;
1044 buf->data += fpdu_len;
1045 buf->seqnum = nextseqnum;
1046 break;
1047 }
1048 /* copy partial fpdu */
1049 i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
1050 txoffset += buf->datalen;
1051 fpdu_len -= buf->datalen;
1052 i40iw_puda_ret_bufpool(ieq, buf);
1053 buf = i40iw_puda_get_listbuf(pbufl);
Mustafa Ismailda5c1382016-07-12 11:48:46 -05001054 if (!buf)
1055 return;
Faisal Latif786c6ad2016-01-20 13:40:05 -06001056 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1057 } while (1);
1058
1059 /* last buffer on the list*/
1060 if (buf->datalen)
1061 list_add(&buf->list, rxlist);
1062 else
1063 i40iw_puda_ret_bufpool(ieq, buf);
1064}
1065
1066/**
1067 * i40iw_ieq_create_pbufl - create buffer list for single fpdu
1068 * @rxlist: resource list for receive ieq buffes
1069 * @pbufl: temp. list for buffers for fpddu
1070 * @buf: first receive buffer
1071 * @fpdu_len: total length of fpdu
1072 */
1073static enum i40iw_status_code i40iw_ieq_create_pbufl(
1074 struct i40iw_pfpdu *pfpdu,
1075 struct list_head *rxlist,
1076 struct list_head *pbufl,
1077 struct i40iw_puda_buf *buf,
1078 u16 fpdu_len)
1079{
1080 enum i40iw_status_code status = 0;
1081 struct i40iw_puda_buf *nextbuf;
1082 u32 nextseqnum;
1083 u16 plen = fpdu_len - buf->datalen;
1084 bool done = false;
1085
1086 nextseqnum = buf->seqnum + buf->datalen;
1087 do {
1088 nextbuf = i40iw_puda_get_listbuf(rxlist);
1089 if (!nextbuf) {
1090 status = I40IW_ERR_list_empty;
1091 break;
1092 }
1093 list_add_tail(&nextbuf->list, pbufl);
1094 if (nextbuf->seqnum != nextseqnum) {
1095 pfpdu->bad_seq_num++;
1096 status = I40IW_ERR_SEQ_NUM;
1097 break;
1098 }
1099 if (nextbuf->datalen >= plen) {
1100 done = true;
1101 } else {
1102 plen -= nextbuf->datalen;
1103 nextseqnum = nextbuf->seqnum + nextbuf->datalen;
1104 }
1105
1106 } while (!done);
1107
1108 return status;
1109}
1110
1111/**
1112 * i40iw_ieq_handle_partial - process partial fpdu buffer
1113 * @ieq: ieq resource
1114 * @pfpdu: partial management per user qp
1115 * @buf: receive buffer
1116 * @fpdu_len: fpdu len in the buffer
1117 */
1118static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
1119 struct i40iw_pfpdu *pfpdu,
1120 struct i40iw_puda_buf *buf,
1121 u16 fpdu_len)
1122{
1123 enum i40iw_status_code status = 0;
1124 u8 *crcptr;
1125 u32 mpacrc;
1126 u32 seqnum = buf->seqnum;
1127 struct list_head pbufl; /* partial buffer list */
1128 struct i40iw_puda_buf *txbuf = NULL;
1129 struct list_head *rxlist = &pfpdu->rxlist;
1130
1131 INIT_LIST_HEAD(&pbufl);
1132 list_add(&buf->list, &pbufl);
1133
1134 status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
1135 if (!status)
1136 goto error;
1137
1138 txbuf = i40iw_puda_get_bufpool(ieq);
1139 if (!txbuf) {
1140 pfpdu->no_tx_bufs++;
1141 status = I40IW_ERR_NO_TXBUFS;
1142 goto error;
1143 }
1144
1145 i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
1146 i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
1147 crcptr = txbuf->data + fpdu_len - 4;
1148 mpacrc = *(u32 *)crcptr;
1149 if (ieq->check_crc) {
Tatyana Nikolova34abf9e2016-03-18 10:38:33 -05001150 status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
Faisal Latif786c6ad2016-01-20 13:40:05 -06001151 (fpdu_len - 4), mpacrc);
1152 if (status) {
1153 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1154 "%s: error bad crc\n", __func__);
1155 goto error;
1156 }
1157 }
1158
1159 i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
1160 txbuf->mem.va, txbuf->totallen);
1161 i40iw_puda_send_buf(ieq, txbuf);
1162 pfpdu->rcv_nxt = seqnum + fpdu_len;
1163 return status;
1164 error:
1165 while (!list_empty(&pbufl)) {
1166 buf = (struct i40iw_puda_buf *)(pbufl.prev);
1167 list_del(&buf->list);
1168 list_add(&buf->list, rxlist);
1169 }
1170 if (txbuf)
1171 i40iw_puda_ret_bufpool(ieq, txbuf);
1172 return status;
1173}
1174
1175/**
1176 * i40iw_ieq_process_buf - process buffer rcvd for ieq
1177 * @ieq: ieq resource
1178 * @pfpdu: partial management per user qp
1179 * @buf: receive buffer
1180 */
1181static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
1182 struct i40iw_pfpdu *pfpdu,
1183 struct i40iw_puda_buf *buf)
1184{
1185 u16 fpdu_len = 0;
1186 u16 datalen = buf->datalen;
1187 u8 *datap = buf->data;
1188 u8 *crcptr;
1189 u16 ioffset = 0;
1190 u32 mpacrc;
1191 u32 seqnum = buf->seqnum;
1192 u16 length = 0;
1193 u16 full = 0;
1194 bool partial = false;
1195 struct i40iw_puda_buf *txbuf;
1196 struct list_head *rxlist = &pfpdu->rxlist;
1197 enum i40iw_status_code ret = 0;
1198 enum i40iw_status_code status = 0;
1199
1200 ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
1201 while (datalen) {
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05001202 fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap));
Faisal Latif786c6ad2016-01-20 13:40:05 -06001203 if (fpdu_len > pfpdu->max_fpdu_data) {
1204 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1205 "%s: error bad fpdu_len\n", __func__);
1206 status = I40IW_ERR_MPA_CRC;
1207 list_add(&buf->list, rxlist);
1208 return status;
1209 }
1210
1211 if (datalen < fpdu_len) {
1212 partial = true;
1213 break;
1214 }
1215 crcptr = datap + fpdu_len - 4;
1216 mpacrc = *(u32 *)crcptr;
1217 if (ieq->check_crc)
Tatyana Nikolova34abf9e2016-03-18 10:38:33 -05001218 ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
Faisal Latif786c6ad2016-01-20 13:40:05 -06001219 datap, fpdu_len - 4, mpacrc);
1220 if (ret) {
1221 status = I40IW_ERR_MPA_CRC;
1222 list_add(&buf->list, rxlist);
1223 return status;
1224 }
1225 full++;
1226 pfpdu->fpdu_processed++;
1227 datap += fpdu_len;
1228 length += fpdu_len;
1229 datalen -= fpdu_len;
1230 }
1231 if (full) {
1232 /* copy full pdu's in the txbuf and send them out */
1233 txbuf = i40iw_puda_get_bufpool(ieq);
1234 if (!txbuf) {
1235 pfpdu->no_tx_bufs++;
1236 status = I40IW_ERR_NO_TXBUFS;
1237 list_add(&buf->list, rxlist);
1238 return status;
1239 }
1240 /* modify txbuf's buffer header */
1241 i40iw_ieq_setup_tx_buf(buf, txbuf);
1242 /* copy full fpdu's to new buffer */
1243 i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
1244 length);
1245 txbuf->totallen = buf->hdrlen + length;
1246
1247 i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
1248 i40iw_puda_send_buf(ieq, txbuf);
1249
1250 if (!datalen) {
1251 pfpdu->rcv_nxt = buf->seqnum + length;
1252 i40iw_puda_ret_bufpool(ieq, buf);
1253 return status;
1254 }
1255 buf->data = datap;
1256 buf->seqnum = seqnum + length;
1257 buf->datalen = datalen;
1258 pfpdu->rcv_nxt = buf->seqnum;
1259 }
1260 if (partial)
1261 status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
1262
1263 return status;
1264}
1265
1266/**
1267 * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
1268 * @qp: qp for which partial fpdus
1269 * @ieq: ieq resource
1270 */
1271static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
1272 struct i40iw_puda_rsrc *ieq)
1273{
1274 struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1275 struct list_head *rxlist = &pfpdu->rxlist;
1276 struct i40iw_puda_buf *buf;
1277 enum i40iw_status_code status;
1278
1279 do {
1280 if (list_empty(rxlist))
1281 break;
1282 buf = i40iw_puda_get_listbuf(rxlist);
1283 if (!buf) {
1284 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1285 "%s: error no buf\n", __func__);
1286 break;
1287 }
1288 if (buf->seqnum != pfpdu->rcv_nxt) {
1289 /* This could be out of order or missing packet */
1290 pfpdu->out_of_order++;
1291 list_add(&buf->list, rxlist);
1292 break;
1293 }
1294 /* keep processing buffers from the head of the list */
1295 status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
1296 if (status == I40IW_ERR_MPA_CRC) {
1297 pfpdu->mpa_crc_err = true;
1298 while (!list_empty(rxlist)) {
1299 buf = i40iw_puda_get_listbuf(rxlist);
1300 i40iw_puda_ret_bufpool(ieq, buf);
1301 pfpdu->crc_err++;
1302 }
1303 /* create CQP for AE */
1304 i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
1305 }
1306 } while (!status);
1307}
1308
1309/**
1310 * i40iw_ieq_handle_exception - handle qp's exception
1311 * @ieq: ieq resource
1312 * @qp: qp receiving excpetion
1313 * @buf: receive buffer
1314 */
1315static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
1316 struct i40iw_sc_qp *qp,
1317 struct i40iw_puda_buf *buf)
1318{
1319 struct i40iw_puda_buf *tmpbuf = NULL;
1320 struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1321 u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
1322 u32 rcv_wnd = hw_host_ctx[23];
1323 /* first partial seq # in q2 */
1324 u32 fps = qp->q2_buf[16];
1325 struct list_head *rxlist = &pfpdu->rxlist;
1326 struct list_head *plist;
1327
1328 pfpdu->total_ieq_bufs++;
1329
1330 if (pfpdu->mpa_crc_err) {
1331 pfpdu->crc_err++;
1332 goto error;
1333 }
1334 if (pfpdu->mode && (fps != pfpdu->fps)) {
1335 /* clean up qp as it is new partial sequence */
1336 i40iw_ieq_cleanup_qp(ieq->dev, qp);
1337 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1338 "%s: restarting new partial\n", __func__);
1339 pfpdu->mode = false;
1340 }
1341
1342 if (!pfpdu->mode) {
1343 i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
1344 /* First_Partial_Sequence_Number check */
1345 pfpdu->rcv_nxt = fps;
1346 pfpdu->fps = fps;
1347 pfpdu->mode = true;
1348 pfpdu->max_fpdu_data = ieq->mss;
1349 pfpdu->pmode_count++;
1350 INIT_LIST_HEAD(rxlist);
1351 i40iw_ieq_check_first_buf(buf, fps);
1352 }
1353
1354 if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
1355 pfpdu->bad_seq_num++;
1356 goto error;
1357 }
1358
1359 if (!list_empty(rxlist)) {
1360 tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
1361 plist = &tmpbuf->list;
1362 while ((struct list_head *)tmpbuf != rxlist) {
1363 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
1364 break;
1365 tmpbuf = (struct i40iw_puda_buf *)plist->next;
1366 }
1367 /* Insert buf before tmpbuf */
1368 list_add_tail(&buf->list, &tmpbuf->list);
1369 } else {
1370 list_add_tail(&buf->list, rxlist);
1371 }
1372 i40iw_ieq_process_fpdus(qp, ieq);
1373 return;
1374 error:
1375 i40iw_puda_ret_bufpool(ieq, buf);
1376}
1377
1378/**
1379 * i40iw_ieq_receive - received exception buffer
1380 * @dev: iwarp device
1381 * @buf: exception buffer received
1382 */
1383static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
1384 struct i40iw_puda_buf *buf)
1385{
1386 struct i40iw_puda_rsrc *ieq = dev->ieq;
1387 struct i40iw_sc_qp *qp = NULL;
1388 u32 wqe_idx = ieq->compl_rxwqe_idx;
1389
1390 qp = i40iw_ieq_get_qp(dev, buf);
1391 if (!qp) {
1392 ieq->stats_bad_qp_id++;
1393 i40iw_puda_ret_bufpool(ieq, buf);
1394 } else {
1395 i40iw_ieq_handle_exception(ieq, qp, buf);
1396 }
1397 /*
1398 * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
1399 * on which wqe_idx to start replenish rq
1400 */
1401 if (!ieq->rxq_invalid_cnt)
1402 ieq->rx_wqe_idx = wqe_idx;
1403 ieq->rxq_invalid_cnt++;
1404}
1405
1406/**
1407 * i40iw_ieq_tx_compl - put back after sending completed exception buffer
1408 * @dev: iwarp device
1409 * @sqwrid: pointer to puda buffer
1410 */
1411static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
1412{
1413 struct i40iw_puda_rsrc *ieq = dev->ieq;
1414 struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
1415
1416 i40iw_puda_ret_bufpool(ieq, buf);
1417 if (!list_empty(&ieq->txpend)) {
1418 buf = i40iw_puda_get_listbuf(&ieq->txpend);
1419 i40iw_puda_send_buf(ieq, buf);
1420 }
1421}
1422
1423/**
1424 * i40iw_ieq_cleanup_qp - qp is being destroyed
1425 * @dev: iwarp device
1426 * @qp: all pending fpdu buffers
1427 */
1428void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1429{
1430 struct i40iw_puda_buf *buf;
1431 struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1432 struct list_head *rxlist = &pfpdu->rxlist;
1433 struct i40iw_puda_rsrc *ieq = dev->ieq;
1434
1435 if (!pfpdu->mode)
1436 return;
1437 while (!list_empty(rxlist)) {
1438 buf = i40iw_puda_get_listbuf(rxlist);
1439 i40iw_puda_ret_bufpool(ieq, buf);
1440 }
1441}