blob: b1962dbd04093bfa3fdd73d929792ebd7c3ae0e5 [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/pci.h>
10#include <linux/netdevice.h>
11#include <linux/ip.h>
12#include <linux/etherdevice.h>
Sunil Goutham83abb7d2017-03-07 18:09:08 +053013#include <linux/iommu.h>
Sunil Goutham4863dea2015-05-26 19:20:15 -070014#include <net/ip.h>
15#include <net/tso.h>
16
17#include "nic_reg.h"
18#include "nic.h"
19#include "q_struct.h"
20#include "nicvf_queues.h"
21
Sunil Goutham83abb7d2017-03-07 18:09:08 +053022#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
23
24static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
25{
26 /* Translation is installed only when IOMMU is present */
27 if (nic->iommu_domain)
28 return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
29 return dma_addr;
30}
31
Sunil Goutham5c2e26f2016-03-14 16:36:14 +053032static void nicvf_get_page(struct nicvf *nic)
33{
34 if (!nic->rb_pageref || !nic->rb_page)
35 return;
36
Joonsoo Kim6d061f92016-05-19 17:10:46 -070037 page_ref_add(nic->rb_page, nic->rb_pageref);
Sunil Goutham5c2e26f2016-03-14 16:36:14 +053038 nic->rb_pageref = 0;
39}
40
Sunil Goutham4863dea2015-05-26 19:20:15 -070041/* Poll a register for a specific value */
42static int nicvf_poll_reg(struct nicvf *nic, int qidx,
43 u64 reg, int bit_pos, int bits, int val)
44{
45 u64 bit_mask;
46 u64 reg_val;
47 int timeout = 10;
48
49 bit_mask = (1ULL << bits) - 1;
50 bit_mask = (bit_mask << bit_pos);
51
52 while (timeout) {
53 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
54 if (((reg_val & bit_mask) >> bit_pos) == val)
55 return 0;
56 usleep_range(1000, 2000);
57 timeout--;
58 }
59 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
60 return 1;
61}
62
63/* Allocate memory for a queue's descriptors */
64static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
65 int q_len, int desc_size, int align_bytes)
66{
67 dmem->q_len = q_len;
68 dmem->size = (desc_size * q_len) + align_bytes;
69 /* Save address, need it while freeing */
70 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
71 &dmem->dma, GFP_KERNEL);
72 if (!dmem->unalign_base)
73 return -ENOMEM;
74
75 /* Align memory address for 'align_bytes' */
76 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
Aleksey Makarov39a0dd02015-06-02 11:00:25 -070077 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
Sunil Goutham4863dea2015-05-26 19:20:15 -070078 return 0;
79}
80
81/* Free queue's descriptor memory */
82static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
83{
84 if (!dmem)
85 return;
86
87 dma_free_coherent(&nic->pdev->dev, dmem->size,
88 dmem->unalign_base, dmem->dma);
89 dmem->unalign_base = NULL;
90 dmem->base = NULL;
91}
92
93/* Allocate buffer for packet reception
94 * HW returns memory address where packet is DMA'ed but not a pointer
95 * into RBDR ring, so save buffer address at the start of fragment and
96 * align the start address to a cache aligned address
97 */
98static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
99 u32 buf_len, u64 **rbuf)
100{
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530101 int order = NICVF_PAGE_ORDER;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700102
103 /* Check if request can be accomodated in previous allocated page */
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530104 if (nic->rb_page &&
105 ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) {
106 nic->rb_pageref++;
107 goto ret;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700108 }
109
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530110 nicvf_get_page(nic);
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530111
Sunil Goutham4863dea2015-05-26 19:20:15 -0700112 /* Allocate a new page */
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530113 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
114 order);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700115 if (!nic->rb_page) {
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530116 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
117 return -ENOMEM;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700118 }
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530119 nic->rb_page_offset = 0;
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530120ret:
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530121 /* HW will ensure data coherency, CPU sync not required */
122 *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
123 nic->rb_page_offset, buf_len,
124 DMA_FROM_DEVICE,
125 DMA_ATTR_SKIP_CPU_SYNC));
126 if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
127 if (!nic->rb_page_offset)
128 __free_pages(nic->rb_page, order);
129 nic->rb_page = NULL;
130 return -ENOMEM;
131 }
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530132 nic->rb_page_offset += buf_len;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700133
Sunil Goutham4863dea2015-05-26 19:20:15 -0700134 return 0;
135}
136
Sunil Goutham668dda02015-12-07 10:30:33 +0530137/* Build skb around receive buffer */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700138static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
139 u64 rb_ptr, int len)
140{
Sunil Goutham668dda02015-12-07 10:30:33 +0530141 void *data;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700142 struct sk_buff *skb;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700143
Sunil Goutham668dda02015-12-07 10:30:33 +0530144 data = phys_to_virt(rb_ptr);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700145
146 /* Now build an skb to give to stack */
Sunil Goutham668dda02015-12-07 10:30:33 +0530147 skb = build_skb(data, RCV_FRAG_LEN);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700148 if (!skb) {
Sunil Goutham668dda02015-12-07 10:30:33 +0530149 put_page(virt_to_page(data));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700150 return NULL;
151 }
152
Sunil Goutham668dda02015-12-07 10:30:33 +0530153 prefetch(skb->data);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700154 return skb;
155}
156
157/* Allocate RBDR ring and populate receive buffers */
158static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
159 int ring_len, int buf_size)
160{
161 int idx;
162 u64 *rbuf;
163 struct rbdr_entry_t *desc;
164 int err;
165
166 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
167 sizeof(struct rbdr_entry_t),
168 NICVF_RCV_BUF_ALIGN_BYTES);
169 if (err)
170 return err;
171
172 rbdr->desc = rbdr->dmem.base;
173 /* Buffer size has to be in multiples of 128 bytes */
174 rbdr->dma_size = buf_size;
175 rbdr->enable = true;
176 rbdr->thresh = RBDR_THRESH;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530177 rbdr->head = 0;
178 rbdr->tail = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700179
180 nic->rb_page = NULL;
181 for (idx = 0; idx < ring_len; idx++) {
182 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
183 &rbuf);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530184 if (err) {
185 /* To free already allocated and mapped ones */
186 rbdr->tail = idx - 1;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700187 return err;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530188 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700189
190 desc = GET_RBDR_DESC(rbdr, idx);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530191 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700192 }
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530193
194 nicvf_get_page(nic);
195
Sunil Goutham4863dea2015-05-26 19:20:15 -0700196 return 0;
197}
198
199/* Free RBDR ring and its receive buffers */
200static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
201{
202 int head, tail;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530203 u64 buf_addr, phys_addr;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700204 struct rbdr_entry_t *desc;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700205
206 if (!rbdr)
207 return;
208
209 rbdr->enable = false;
210 if (!rbdr->dmem.base)
211 return;
212
213 head = rbdr->head;
214 tail = rbdr->tail;
215
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530216 /* Release page references */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700217 while (head != tail) {
218 desc = GET_RBDR_DESC(rbdr, head);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530219 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
220 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
221 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
222 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
223 if (phys_addr)
224 put_page(virt_to_page(phys_to_virt(phys_addr)));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700225 head++;
226 head &= (rbdr->dmem.q_len - 1);
227 }
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530228 /* Release buffer of tail desc */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700229 desc = GET_RBDR_DESC(rbdr, tail);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530230 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
231 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
232 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
233 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
234 if (phys_addr)
235 put_page(virt_to_page(phys_to_virt(phys_addr)));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700236
237 /* Free RBDR ring */
238 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
239}
240
241/* Refill receive buffer descriptors with new buffers.
242 */
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700243static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700244{
245 struct queue_set *qs = nic->qs;
246 int rbdr_idx = qs->rbdr_cnt;
247 int tail, qcount;
248 int refill_rb_cnt;
249 struct rbdr *rbdr;
250 struct rbdr_entry_t *desc;
251 u64 *rbuf;
252 int new_rb = 0;
253
254refill:
255 if (!rbdr_idx)
256 return;
257 rbdr_idx--;
258 rbdr = &qs->rbdr[rbdr_idx];
259 /* Check if it's enabled */
260 if (!rbdr->enable)
261 goto next_rbdr;
262
263 /* Get no of desc's to be refilled */
264 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
265 qcount &= 0x7FFFF;
266 /* Doorbell can be ringed with a max of ring size minus 1 */
267 if (qcount >= (qs->rbdr_len - 1))
268 goto next_rbdr;
269 else
270 refill_rb_cnt = qs->rbdr_len - qcount - 1;
271
272 /* Start filling descs from tail */
273 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
274 while (refill_rb_cnt) {
275 tail++;
276 tail &= (rbdr->dmem.q_len - 1);
277
278 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
279 break;
280
281 desc = GET_RBDR_DESC(rbdr, tail);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530282 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700283 refill_rb_cnt--;
284 new_rb++;
285 }
286
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530287 nicvf_get_page(nic);
288
Sunil Goutham4863dea2015-05-26 19:20:15 -0700289 /* make sure all memory stores are done before ringing doorbell */
290 smp_wmb();
291
292 /* Check if buffer allocation failed */
293 if (refill_rb_cnt)
294 nic->rb_alloc_fail = true;
295 else
296 nic->rb_alloc_fail = false;
297
298 /* Notify HW */
299 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
300 rbdr_idx, new_rb);
301next_rbdr:
302 /* Re-enable RBDR interrupts only if buffer allocation is success */
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530303 if (!nic->rb_alloc_fail && rbdr->enable &&
304 netif_running(nic->pnicvf->netdev))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700305 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
306
307 if (rbdr_idx)
308 goto refill;
309}
310
311/* Alloc rcv buffers in non-atomic mode for better success */
312void nicvf_rbdr_work(struct work_struct *work)
313{
314 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
315
316 nicvf_refill_rbdr(nic, GFP_KERNEL);
317 if (nic->rb_alloc_fail)
318 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
319 else
320 nic->rb_work_scheduled = false;
321}
322
323/* In Softirq context, alloc rcv buffers in atomic mode */
324void nicvf_rbdr_task(unsigned long data)
325{
326 struct nicvf *nic = (struct nicvf *)data;
327
328 nicvf_refill_rbdr(nic, GFP_ATOMIC);
329 if (nic->rb_alloc_fail) {
330 nic->rb_work_scheduled = true;
331 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
332 }
333}
334
335/* Initialize completion queue */
336static int nicvf_init_cmp_queue(struct nicvf *nic,
337 struct cmp_queue *cq, int q_len)
338{
339 int err;
340
341 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
342 NICVF_CQ_BASE_ALIGN_BYTES);
343 if (err)
344 return err;
345
346 cq->desc = cq->dmem.base;
Sunil Gouthamb9687b42015-12-10 13:25:20 +0530347 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700348 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
349
350 return 0;
351}
352
353static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
354{
355 if (!cq)
356 return;
357 if (!cq->dmem.base)
358 return;
359
360 nicvf_free_q_desc_mem(nic, &cq->dmem);
361}
362
363/* Initialize transmit queue */
364static int nicvf_init_snd_queue(struct nicvf *nic,
365 struct snd_queue *sq, int q_len)
366{
367 int err;
368
369 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
370 NICVF_SQ_BASE_ALIGN_BYTES);
371 if (err)
372 return err;
373
374 sq->desc = sq->dmem.base;
Aleksey Makarov86ace692015-06-02 11:00:27 -0700375 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
Aleksey Makarovfa1a6c92015-06-02 11:00:26 -0700376 if (!sq->skbuff)
377 return -ENOMEM;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700378 sq->head = 0;
379 sq->tail = 0;
380 atomic_set(&sq->free_cnt, q_len - 1);
381 sq->thresh = SND_QUEUE_THRESH;
382
383 /* Preallocate memory for TSO segment's header */
384 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
385 q_len * TSO_HEADER_SIZE,
386 &sq->tso_hdrs_phys, GFP_KERNEL);
387 if (!sq->tso_hdrs)
388 return -ENOMEM;
389
390 return 0;
391}
392
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530393void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
394 int hdr_sqe, u8 subdesc_cnt)
395{
396 u8 idx;
397 struct sq_gather_subdesc *gather;
398
399 /* Unmap DMA mapped skb data buffers */
400 for (idx = 0; idx < subdesc_cnt; idx++) {
401 hdr_sqe++;
402 hdr_sqe &= (sq->dmem.q_len - 1);
403 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
404 /* HW will ensure data coherency, CPU sync not required */
405 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
406 gather->size, DMA_TO_DEVICE,
407 DMA_ATTR_SKIP_CPU_SYNC);
408 }
409}
410
Sunil Goutham4863dea2015-05-26 19:20:15 -0700411static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
412{
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530413 struct sk_buff *skb;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530414 struct sq_hdr_subdesc *hdr;
415 struct sq_hdr_subdesc *tso_sqe;
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530416
Sunil Goutham4863dea2015-05-26 19:20:15 -0700417 if (!sq)
418 return;
419 if (!sq->dmem.base)
420 return;
421
422 if (sq->tso_hdrs)
Sunil Goutham143ceb02015-07-29 16:49:37 +0300423 dma_free_coherent(&nic->pdev->dev,
424 sq->dmem.q_len * TSO_HEADER_SIZE,
Sunil Goutham4863dea2015-05-26 19:20:15 -0700425 sq->tso_hdrs, sq->tso_hdrs_phys);
426
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530427 /* Free pending skbs in the queue */
428 smp_rmb();
429 while (sq->head != sq->tail) {
430 skb = (struct sk_buff *)sq->skbuff[sq->head];
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530431 if (!skb)
432 goto next;
433 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
434 /* Check for dummy descriptor used for HW TSO offload on 88xx */
435 if (hdr->dont_send) {
436 /* Get actual TSO descriptors and unmap them */
437 tso_sqe =
438 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
439 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
440 tso_sqe->subdesc_cnt);
441 } else {
442 nicvf_unmap_sndq_buffers(nic, sq, sq->head,
443 hdr->subdesc_cnt);
444 }
445 dev_kfree_skb_any(skb);
446next:
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530447 sq->head++;
448 sq->head &= (sq->dmem.q_len - 1);
449 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700450 kfree(sq->skbuff);
451 nicvf_free_q_desc_mem(nic, &sq->dmem);
452}
453
454static void nicvf_reclaim_snd_queue(struct nicvf *nic,
455 struct queue_set *qs, int qidx)
456{
457 /* Disable send queue */
458 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
459 /* Check if SQ is stopped */
460 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
461 return;
462 /* Reset send queue */
463 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
464}
465
466static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
467 struct queue_set *qs, int qidx)
468{
469 union nic_mbx mbx = {};
470
471 /* Make sure all packets in the pipeline are written back into mem */
472 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
473 nicvf_send_msg_to_pf(nic, &mbx);
474}
475
476static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
477 struct queue_set *qs, int qidx)
478{
479 /* Disable timer threshold (doesn't get reset upon CQ reset */
480 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
481 /* Disable completion queue */
482 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
483 /* Reset completion queue */
484 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
485}
486
487static void nicvf_reclaim_rbdr(struct nicvf *nic,
488 struct rbdr *rbdr, int qidx)
489{
490 u64 tmp, fifo_state;
491 int timeout = 10;
492
493 /* Save head and tail pointers for feeing up buffers */
494 rbdr->head = nicvf_queue_reg_read(nic,
495 NIC_QSET_RBDR_0_1_HEAD,
496 qidx) >> 3;
497 rbdr->tail = nicvf_queue_reg_read(nic,
498 NIC_QSET_RBDR_0_1_TAIL,
499 qidx) >> 3;
500
501 /* If RBDR FIFO is in 'FAIL' state then do a reset first
502 * before relaiming.
503 */
504 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
505 if (((fifo_state >> 62) & 0x03) == 0x3)
506 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
507 qidx, NICVF_RBDR_RESET);
508
509 /* Disable RBDR */
510 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
511 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
512 return;
513 while (1) {
514 tmp = nicvf_queue_reg_read(nic,
515 NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
516 qidx);
517 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
518 break;
519 usleep_range(1000, 2000);
520 timeout--;
521 if (!timeout) {
522 netdev_err(nic->netdev,
523 "Failed polling on prefetch status\n");
524 return;
525 }
526 }
527 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
528 qidx, NICVF_RBDR_RESET);
529
530 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
531 return;
532 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
533 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
534 return;
535}
536
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300537void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
538{
539 u64 rq_cfg;
540 int sqs;
541
542 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
543
544 /* Enable first VLAN stripping */
545 if (features & NETIF_F_HW_VLAN_CTAG_RX)
546 rq_cfg |= (1ULL << 25);
547 else
548 rq_cfg &= ~(1ULL << 25);
549 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
550
551 /* Configure Secondary Qsets, if any */
552 for (sqs = 0; sqs < nic->sqs_count; sqs++)
553 if (nic->snicvf[sqs])
554 nicvf_queue_reg_write(nic->snicvf[sqs],
555 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
556}
557
Jerin Jacob3458c402016-08-12 16:51:39 +0530558static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
559{
560 union nic_mbx mbx = {};
561
Sunil Goutham964cb692016-11-15 17:38:16 +0530562 /* Reset all RQ/SQ and VF stats */
Jerin Jacob3458c402016-08-12 16:51:39 +0530563 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
Sunil Goutham964cb692016-11-15 17:38:16 +0530564 mbx.reset_stat.rx_stat_mask = 0x3FFF;
565 mbx.reset_stat.tx_stat_mask = 0x1F;
Jerin Jacob3458c402016-08-12 16:51:39 +0530566 mbx.reset_stat.rq_stat_mask = 0xFFFF;
Sunil Goutham964cb692016-11-15 17:38:16 +0530567 mbx.reset_stat.sq_stat_mask = 0xFFFF;
Jerin Jacob3458c402016-08-12 16:51:39 +0530568 nicvf_send_msg_to_pf(nic, &mbx);
569}
570
Sunil Goutham4863dea2015-05-26 19:20:15 -0700571/* Configures receive queue */
572static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
573 int qidx, bool enable)
574{
575 union nic_mbx mbx = {};
576 struct rcv_queue *rq;
577 struct rq_cfg rq_cfg;
578
579 rq = &qs->rq[qidx];
580 rq->enable = enable;
581
582 /* Disable receive queue */
583 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
584
585 if (!rq->enable) {
586 nicvf_reclaim_rcv_queue(nic, qs, qidx);
587 return;
588 }
589
590 rq->cq_qs = qs->vnic_id;
591 rq->cq_idx = qidx;
592 rq->start_rbdr_qs = qs->vnic_id;
593 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
594 rq->cont_rbdr_qs = qs->vnic_id;
595 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
596 /* all writes of RBDR data to be loaded into L2 Cache as well*/
597 rq->caching = 1;
598
599 /* Send a mailbox msg to PF to config RQ */
600 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
601 mbx.rq.qs_num = qs->vnic_id;
602 mbx.rq.rq_num = qidx;
603 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
604 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
605 (rq->cont_qs_rbdr_idx << 8) |
606 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
607 nicvf_send_msg_to_pf(nic, &mbx);
608
609 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
Sunil Gouthamd5b2d7a2016-11-24 14:48:02 +0530610 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
611 (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
612 (qs->vnic_id << 0);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700613 nicvf_send_msg_to_pf(nic, &mbx);
614
615 /* RQ drop config
616 * Enable CQ drop to reserve sufficient CQEs for all tx packets
617 */
618 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
Sunil Gouthamd5b2d7a2016-11-24 14:48:02 +0530619 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
620 (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
621 (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700622 nicvf_send_msg_to_pf(nic, &mbx);
623
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530624 if (!nic->sqs_mode && (qidx == 0)) {
625 /* Enable checking L3/L4 length and TCP/UDP checksums */
626 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
627 (BIT(24) | BIT(23) | BIT(21)));
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300628 nicvf_config_vlan_stripping(nic, nic->netdev->features);
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530629 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700630
631 /* Enable Receive queue */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200632 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700633 rq_cfg.ena = 1;
634 rq_cfg.tcp_ena = 0;
635 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
636}
637
638/* Configures completion queue */
639void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
640 int qidx, bool enable)
641{
642 struct cmp_queue *cq;
643 struct cq_cfg cq_cfg;
644
645 cq = &qs->cq[qidx];
646 cq->enable = enable;
647
648 if (!cq->enable) {
649 nicvf_reclaim_cmp_queue(nic, qs, qidx);
650 return;
651 }
652
653 /* Reset completion queue */
654 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
655
656 if (!cq->enable)
657 return;
658
659 spin_lock_init(&cq->lock);
660 /* Set completion queue base address */
661 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
662 qidx, (u64)(cq->dmem.phys_base));
663
664 /* Enable Completion queue */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200665 memset(&cq_cfg, 0, sizeof(struct cq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700666 cq_cfg.ena = 1;
667 cq_cfg.reset = 0;
668 cq_cfg.caching = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530669 cq_cfg.qsize = ilog2(qs->cq_len >> 10);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700670 cq_cfg.avg_con = 0;
671 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
672
673 /* Set threshold value for interrupt generation */
674 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
675 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
Sunil Goutham006394a2015-12-02 15:36:15 +0530676 qidx, CMP_QUEUE_TIMER_THRESH);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700677}
678
679/* Configures transmit queue */
680static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
681 int qidx, bool enable)
682{
683 union nic_mbx mbx = {};
684 struct snd_queue *sq;
685 struct sq_cfg sq_cfg;
686
687 sq = &qs->sq[qidx];
688 sq->enable = enable;
689
690 if (!sq->enable) {
691 nicvf_reclaim_snd_queue(nic, qs, qidx);
692 return;
693 }
694
695 /* Reset send queue */
696 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
697
698 sq->cq_qs = qs->vnic_id;
699 sq->cq_idx = qidx;
700
701 /* Send a mailbox msg to PF to config SQ */
702 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
703 mbx.sq.qs_num = qs->vnic_id;
704 mbx.sq.sq_num = qidx;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300705 mbx.sq.sqs_mode = nic->sqs_mode;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700706 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
707 nicvf_send_msg_to_pf(nic, &mbx);
708
709 /* Set queue base address */
710 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
711 qidx, (u64)(sq->dmem.phys_base));
712
713 /* Enable send queue & set queue size */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200714 memset(&sq_cfg, 0, sizeof(struct sq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700715 sq_cfg.ena = 1;
716 sq_cfg.reset = 0;
717 sq_cfg.ldwb = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530718 sq_cfg.qsize = ilog2(qs->sq_len >> 10);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700719 sq_cfg.tstmp_bgx_intf = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530720 /* CQ's level at which HW will stop processing SQEs to avoid
721 * transmitting a pkt with no space in CQ to post CQE_TX.
722 */
723 sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700724 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
725
726 /* Set threshold value for interrupt generation */
727 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
728
729 /* Set queue:cpu affinity for better load distribution */
730 if (cpu_online(qidx)) {
731 cpumask_set_cpu(qidx, &sq->affinity_mask);
732 netif_set_xps_queue(nic->netdev,
733 &sq->affinity_mask, qidx);
734 }
735}
736
737/* Configures receive buffer descriptor ring */
738static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
739 int qidx, bool enable)
740{
741 struct rbdr *rbdr;
742 struct rbdr_cfg rbdr_cfg;
743
744 rbdr = &qs->rbdr[qidx];
745 nicvf_reclaim_rbdr(nic, rbdr, qidx);
746 if (!enable)
747 return;
748
749 /* Set descriptor base address */
750 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
751 qidx, (u64)(rbdr->dmem.phys_base));
752
753 /* Enable RBDR & set queue size */
754 /* Buffer size should be in multiples of 128 bytes */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200755 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700756 rbdr_cfg.ena = 1;
757 rbdr_cfg.reset = 0;
758 rbdr_cfg.ldwb = 0;
759 rbdr_cfg.qsize = RBDR_SIZE;
760 rbdr_cfg.avg_con = 0;
761 rbdr_cfg.lines = rbdr->dma_size / 128;
762 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
763 qidx, *(u64 *)&rbdr_cfg);
764
765 /* Notify HW */
766 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
767 qidx, qs->rbdr_len - 1);
768
769 /* Set threshold value for interrupt generation */
770 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
771 qidx, rbdr->thresh - 1);
772}
773
774/* Requests PF to assign and enable Qset */
775void nicvf_qset_config(struct nicvf *nic, bool enable)
776{
777 union nic_mbx mbx = {};
778 struct queue_set *qs = nic->qs;
779 struct qs_cfg *qs_cfg;
780
781 if (!qs) {
782 netdev_warn(nic->netdev,
783 "Qset is still not allocated, don't init queues\n");
784 return;
785 }
786
787 qs->enable = enable;
788 qs->vnic_id = nic->vf_id;
789
790 /* Send a mailbox msg to PF to config Qset */
791 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
792 mbx.qs.num = qs->vnic_id;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300793 mbx.qs.sqs_count = nic->sqs_count;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700794
795 mbx.qs.cfg = 0;
796 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
797 if (qs->enable) {
798 qs_cfg->ena = 1;
799#ifdef __BIG_ENDIAN
800 qs_cfg->be = 1;
801#endif
802 qs_cfg->vnic = qs->vnic_id;
803 }
804 nicvf_send_msg_to_pf(nic, &mbx);
805}
806
807static void nicvf_free_resources(struct nicvf *nic)
808{
809 int qidx;
810 struct queue_set *qs = nic->qs;
811
812 /* Free receive buffer descriptor ring */
813 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
814 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
815
816 /* Free completion queue */
817 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
818 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
819
820 /* Free send queue */
821 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
822 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
823}
824
825static int nicvf_alloc_resources(struct nicvf *nic)
826{
827 int qidx;
828 struct queue_set *qs = nic->qs;
829
830 /* Alloc receive buffer descriptor ring */
831 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
832 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
833 DMA_BUFFER_LEN))
834 goto alloc_fail;
835 }
836
837 /* Alloc send queue */
838 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
839 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
840 goto alloc_fail;
841 }
842
843 /* Alloc completion queue */
844 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
845 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
846 goto alloc_fail;
847 }
848
849 return 0;
850alloc_fail:
851 nicvf_free_resources(nic);
852 return -ENOMEM;
853}
854
855int nicvf_set_qset_resources(struct nicvf *nic)
856{
857 struct queue_set *qs;
858
859 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
860 if (!qs)
861 return -ENOMEM;
862 nic->qs = qs;
863
864 /* Set count of each queue */
Sunil Goutham3a397eb2016-08-12 16:51:27 +0530865 qs->rbdr_cnt = DEFAULT_RBDR_CNT;
866 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
867 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
868 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700869
870 /* Set queue lengths */
871 qs->rbdr_len = RCV_BUF_COUNT;
872 qs->sq_len = SND_QUEUE_LEN;
873 qs->cq_len = CMP_QUEUE_LEN;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300874
875 nic->rx_queues = qs->rq_cnt;
876 nic->tx_queues = qs->sq_cnt;
877
Sunil Goutham4863dea2015-05-26 19:20:15 -0700878 return 0;
879}
880
881int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
882{
883 bool disable = false;
884 struct queue_set *qs = nic->qs;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530885 struct queue_set *pqs = nic->pnicvf->qs;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700886 int qidx;
887
888 if (!qs)
889 return 0;
890
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530891 /* Take primary VF's queue lengths.
892 * This is needed to take queue lengths set from ethtool
893 * into consideration.
894 */
895 if (nic->sqs_mode && pqs) {
896 qs->cq_len = pqs->cq_len;
897 qs->sq_len = pqs->sq_len;
898 }
899
Sunil Goutham4863dea2015-05-26 19:20:15 -0700900 if (enable) {
901 if (nicvf_alloc_resources(nic))
902 return -ENOMEM;
903
904 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
905 nicvf_snd_queue_config(nic, qs, qidx, enable);
906 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
907 nicvf_cmp_queue_config(nic, qs, qidx, enable);
908 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
909 nicvf_rbdr_config(nic, qs, qidx, enable);
910 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
911 nicvf_rcv_queue_config(nic, qs, qidx, enable);
912 } else {
913 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
914 nicvf_rcv_queue_config(nic, qs, qidx, disable);
915 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
916 nicvf_rbdr_config(nic, qs, qidx, disable);
917 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
918 nicvf_snd_queue_config(nic, qs, qidx, disable);
919 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
920 nicvf_cmp_queue_config(nic, qs, qidx, disable);
921
922 nicvf_free_resources(nic);
923 }
924
Jerin Jacob3458c402016-08-12 16:51:39 +0530925 /* Reset RXQ's stats.
926 * SQ's stats will get reset automatically once SQ is reset.
927 */
928 nicvf_reset_rcv_queue_stats(nic);
929
Sunil Goutham4863dea2015-05-26 19:20:15 -0700930 return 0;
931}
932
933/* Get a free desc from SQ
934 * returns descriptor ponter & descriptor number
935 */
936static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
937{
938 int qentry;
939
940 qentry = sq->tail;
941 atomic_sub(desc_cnt, &sq->free_cnt);
942 sq->tail += desc_cnt;
943 sq->tail &= (sq->dmem.q_len - 1);
944
945 return qentry;
946}
947
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530948/* Rollback to previous tail pointer when descriptors not used */
949static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
950 int qentry, int desc_cnt)
951{
952 sq->tail = qentry;
953 atomic_add(desc_cnt, &sq->free_cnt);
954}
955
Sunil Goutham4863dea2015-05-26 19:20:15 -0700956/* Free descriptor back to SQ for future use */
957void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
958{
959 atomic_add(desc_cnt, &sq->free_cnt);
960 sq->head += desc_cnt;
961 sq->head &= (sq->dmem.q_len - 1);
962}
963
964static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
965{
966 qentry++;
967 qentry &= (sq->dmem.q_len - 1);
968 return qentry;
969}
970
971void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
972{
973 u64 sq_cfg;
974
975 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
976 sq_cfg |= NICVF_SQ_EN;
977 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
978 /* Ring doorbell so that H/W restarts processing SQEs */
979 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
980}
981
982void nicvf_sq_disable(struct nicvf *nic, int qidx)
983{
984 u64 sq_cfg;
985
986 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
987 sq_cfg &= ~NICVF_SQ_EN;
988 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
989}
990
991void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
992 int qidx)
993{
994 u64 head, tail;
995 struct sk_buff *skb;
996 struct nicvf *nic = netdev_priv(netdev);
997 struct sq_hdr_subdesc *hdr;
998
999 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1000 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1001 while (sq->head != head) {
1002 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1003 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1004 nicvf_put_sq_desc(sq, 1);
1005 continue;
1006 }
1007 skb = (struct sk_buff *)sq->skbuff[sq->head];
Sunil Goutham143ceb02015-07-29 16:49:37 +03001008 if (skb)
1009 dev_kfree_skb_any(skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001010 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
1011 atomic64_add(hdr->tot_len,
1012 (atomic64_t *)&netdev->stats.tx_bytes);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001013 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1014 }
1015}
1016
1017/* Calculate no of SQ subdescriptors needed to transmit all
1018 * segments of this TSO packet.
1019 * Taken from 'Tilera network driver' with a minor modification.
1020 */
1021static int nicvf_tso_count_subdescs(struct sk_buff *skb)
1022{
1023 struct skb_shared_info *sh = skb_shinfo(skb);
1024 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1025 unsigned int data_len = skb->len - sh_len;
1026 unsigned int p_len = sh->gso_size;
1027 long f_id = -1; /* id of the current fragment */
1028 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1029 long f_used = 0; /* bytes used from the current fragment */
1030 long n; /* size of the current piece of payload */
1031 int num_edescs = 0;
1032 int segment;
1033
1034 for (segment = 0; segment < sh->gso_segs; segment++) {
1035 unsigned int p_used = 0;
1036
1037 /* One edesc for header and for each piece of the payload. */
1038 for (num_edescs++; p_used < p_len; num_edescs++) {
1039 /* Advance as needed. */
1040 while (f_used >= f_size) {
1041 f_id++;
1042 f_size = skb_frag_size(&sh->frags[f_id]);
1043 f_used = 0;
1044 }
1045
1046 /* Use bytes from the current fragment. */
1047 n = p_len - p_used;
1048 if (n > f_size - f_used)
1049 n = f_size - f_used;
1050 f_used += n;
1051 p_used += n;
1052 }
1053
1054 /* The last segment may be less than gso_size. */
1055 data_len -= p_len;
1056 if (data_len < p_len)
1057 p_len = data_len;
1058 }
1059
1060 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
1061 return num_edescs + sh->gso_segs;
1062}
1063
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301064#define POST_CQE_DESC_COUNT 2
1065
Sunil Goutham4863dea2015-05-26 19:20:15 -07001066/* Get the number of SQ descriptors needed to xmit this skb */
1067static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
1068{
1069 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
1070
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301071 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001072 subdesc_cnt = nicvf_tso_count_subdescs(skb);
1073 return subdesc_cnt;
1074 }
1075
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301076 /* Dummy descriptors to get TSO pkt completion notification */
1077 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
1078 subdesc_cnt += POST_CQE_DESC_COUNT;
1079
Sunil Goutham4863dea2015-05-26 19:20:15 -07001080 if (skb_shinfo(skb)->nr_frags)
1081 subdesc_cnt += skb_shinfo(skb)->nr_frags;
1082
1083 return subdesc_cnt;
1084}
1085
1086/* Add SQ HEADER subdescriptor.
1087 * First subdescriptor for every send descriptor.
1088 */
1089static inline void
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301090nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001091 int subdesc_cnt, struct sk_buff *skb, int len)
1092{
1093 int proto;
1094 struct sq_hdr_subdesc *hdr;
1095
1096 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001097 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1098 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301099
1100 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
1101 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
1102 * segment transmitted on 88xx.
1103 */
1104 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
1105 } else {
1106 sq->skbuff[qentry] = (u64)skb;
1107 /* Enable notification via CQE after processing SQE */
1108 hdr->post_cqe = 1;
1109 /* No of subdescriptors following this */
1110 hdr->subdesc_cnt = subdesc_cnt;
1111 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001112 hdr->tot_len = len;
1113
1114 /* Offload checksum calculation to HW */
1115 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001116 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1117 hdr->l3_offset = skb_network_offset(skb);
1118 hdr->l4_offset = skb_transport_offset(skb);
1119
1120 proto = ip_hdr(skb)->protocol;
1121 switch (proto) {
1122 case IPPROTO_TCP:
1123 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1124 break;
1125 case IPPROTO_UDP:
1126 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1127 break;
1128 case IPPROTO_SCTP:
1129 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1130 break;
1131 }
1132 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301133
1134 if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
1135 hdr->tso = 1;
1136 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
1137 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1138 /* For non-tunneled pkts, point this to L2 ethertype */
1139 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
Sunil Goutham964cb692016-11-15 17:38:16 +05301140 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301141 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001142}
1143
1144/* SQ GATHER subdescriptor
1145 * Must follow HDR descriptor
1146 */
1147static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1148 int size, u64 data)
1149{
1150 struct sq_gather_subdesc *gather;
1151
1152 qentry &= (sq->dmem.q_len - 1);
1153 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1154
1155 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1156 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
Sunil Goutham4b561c12015-07-29 16:49:36 +03001157 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001158 gather->size = size;
1159 gather->addr = data;
1160}
1161
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301162/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1163 * packet so that a CQE is posted as a notifation for transmission of
1164 * TSO packet.
1165 */
1166static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1167 int tso_sqe, struct sk_buff *skb)
1168{
1169 struct sq_imm_subdesc *imm;
1170 struct sq_hdr_subdesc *hdr;
1171
1172 sq->skbuff[qentry] = (u64)skb;
1173
1174 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1175 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1176 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1177 /* Enable notification via CQE after processing SQE */
1178 hdr->post_cqe = 1;
1179 /* There is no packet to transmit here */
1180 hdr->dont_send = 1;
1181 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1182 hdr->tot_len = 1;
1183 /* Actual TSO header SQE index, needed for cleanup */
1184 hdr->rsvd2 = tso_sqe;
1185
1186 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1187 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1188 memset(imm, 0, SND_QUEUE_DESC_SIZE);
1189 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1190 imm->len = 1;
1191}
1192
Sunil Goutham2c204c22016-09-23 14:42:28 +05301193static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
1194 int sq_num, int desc_cnt)
1195{
1196 struct netdev_queue *txq;
1197
1198 txq = netdev_get_tx_queue(nic->pnicvf->netdev,
1199 skb_get_queue_mapping(skb));
1200
1201 netdev_tx_sent_queue(txq, skb->len);
1202
1203 /* make sure all memory stores are done before ringing doorbell */
1204 smp_wmb();
1205
1206 /* Inform HW to xmit all TSO segments */
1207 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1208 sq_num, desc_cnt);
1209}
1210
Sunil Goutham4863dea2015-05-26 19:20:15 -07001211/* Segment a TSO packet into 'gso_size' segments and append
1212 * them to SQ for transfer
1213 */
1214static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
Sunil Goutham92dc8762015-08-30 12:29:15 +03001215 int sq_num, int qentry, struct sk_buff *skb)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001216{
1217 struct tso_t tso;
1218 int seg_subdescs = 0, desc_cnt = 0;
1219 int seg_len, total_len, data_left;
1220 int hdr_qentry = qentry;
1221 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1222
1223 tso_start(skb, &tso);
1224 total_len = skb->len - hdr_len;
1225 while (total_len > 0) {
1226 char *hdr;
1227
1228 /* Save Qentry for adding HDR_SUBDESC at the end */
1229 hdr_qentry = qentry;
1230
1231 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1232 total_len -= data_left;
1233
1234 /* Add segment's header */
1235 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1236 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1237 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1238 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1239 sq->tso_hdrs_phys +
1240 qentry * TSO_HEADER_SIZE);
1241 /* HDR_SUDESC + GATHER */
1242 seg_subdescs = 2;
1243 seg_len = hdr_len;
1244
1245 /* Add segment's payload fragments */
1246 while (data_left > 0) {
1247 int size;
1248
1249 size = min_t(int, tso.size, data_left);
1250
1251 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1252 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1253 virt_to_phys(tso.data));
1254 seg_subdescs++;
1255 seg_len += size;
1256
1257 data_left -= size;
1258 tso_build_data(skb, &tso, size);
1259 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301260 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001261 seg_subdescs - 1, skb, seg_len);
Sunil Goutham143ceb02015-07-29 16:49:37 +03001262 sq->skbuff[hdr_qentry] = (u64)NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001263 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1264
1265 desc_cnt += seg_subdescs;
1266 }
1267 /* Save SKB in the last segment for freeing */
1268 sq->skbuff[hdr_qentry] = (u64)skb;
1269
Sunil Goutham2c204c22016-09-23 14:42:28 +05301270 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001271
Sunil Goutham964cb692016-11-15 17:38:16 +05301272 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001273 return 1;
1274}
1275
1276/* Append an skb to a SQ for packet transfer. */
Sunil Gouthambd3ad7d2016-12-01 18:24:28 +05301277int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1278 struct sk_buff *skb, u8 sq_num)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001279{
1280 int i, size;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301281 int subdesc_cnt, hdr_sqe = 0;
Sunil Gouthambd3ad7d2016-12-01 18:24:28 +05301282 int qentry;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301283 u64 dma_addr;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001284
1285 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1286 if (subdesc_cnt > atomic_read(&sq->free_cnt))
1287 goto append_fail;
1288
1289 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1290
1291 /* Check if its a TSO packet */
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301292 if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
Sunil Goutham92dc8762015-08-30 12:29:15 +03001293 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001294
1295 /* Add SQ header subdesc */
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301296 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1297 skb, skb->len);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301298 hdr_sqe = qentry;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001299
1300 /* Add SQ gather subdescs */
1301 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1302 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301303 /* HW will ensure data coherency, CPU sync not required */
1304 dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
1305 offset_in_page(skb->data), size,
1306 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1307 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1308 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1309 return 0;
1310 }
1311
1312 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001313
1314 /* Check for scattered buffer */
1315 if (!skb_is_nonlinear(skb))
1316 goto doorbell;
1317
1318 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1319 const struct skb_frag_struct *frag;
1320
1321 frag = &skb_shinfo(skb)->frags[i];
1322
1323 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1324 size = skb_frag_size(frag);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301325 dma_addr = dma_map_page_attrs(&nic->pdev->dev,
1326 skb_frag_page(frag),
1327 frag->page_offset, size,
1328 DMA_TO_DEVICE,
1329 DMA_ATTR_SKIP_CPU_SYNC);
1330 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1331 /* Free entire chain of mapped buffers
1332 * here 'i' = frags mapped + above mapped skb->data
1333 */
1334 nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
1335 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1336 return 0;
1337 }
1338 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001339 }
1340
1341doorbell:
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301342 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1343 qentry = nicvf_get_nxt_sqentry(sq, qentry);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301344 nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301345 }
1346
Sunil Goutham2c204c22016-09-23 14:42:28 +05301347 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001348
Sunil Goutham4863dea2015-05-26 19:20:15 -07001349 return 1;
1350
1351append_fail:
Sunil Goutham92dc8762015-08-30 12:29:15 +03001352 /* Use original PCI dev for debug log */
1353 nic = nic->pnicvf;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001354 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1355 return 0;
1356}
1357
1358static inline unsigned frag_num(unsigned i)
1359{
1360#ifdef __BIG_ENDIAN
1361 return (i & ~3) + 3 - (i & 3);
1362#else
1363 return i;
1364#endif
1365}
1366
1367/* Returns SKB for a received packet */
1368struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1369{
1370 int frag;
1371 int payload_len = 0;
1372 struct sk_buff *skb = NULL;
Sunil Gouthama8671ac2016-08-12 16:51:37 +05301373 struct page *page;
1374 int offset;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001375 u16 *rb_lens = NULL;
1376 u64 *rb_ptrs = NULL;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301377 u64 phys_addr;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001378
1379 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
Sunil Goutham02a72bd2016-08-12 16:51:28 +05301380 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
1381 * CQE_RX at word6, hence buffer pointers move by word
1382 *
1383 * Use existing 'hw_tso' flag which will be set for all chips
1384 * except 88xx pass1 instead of a additional cache line
1385 * access (or miss) by using pci dev's revision.
1386 */
1387 if (!nic->hw_tso)
1388 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1389 else
1390 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
Sunil Goutham4863dea2015-05-26 19:20:15 -07001391
Sunil Goutham4863dea2015-05-26 19:20:15 -07001392 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1393 payload_len = rb_lens[frag_num(frag)];
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301394 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
1395 if (!phys_addr) {
1396 if (skb)
1397 dev_kfree_skb_any(skb);
1398 return NULL;
1399 }
1400
Sunil Goutham4863dea2015-05-26 19:20:15 -07001401 if (!frag) {
1402 /* First fragment */
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301403 dma_unmap_page_attrs(&nic->pdev->dev,
1404 *rb_ptrs - cqe_rx->align_pad,
1405 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1406 DMA_ATTR_SKIP_CPU_SYNC);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001407 skb = nicvf_rb_ptr_to_skb(nic,
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301408 phys_addr - cqe_rx->align_pad,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001409 payload_len);
1410 if (!skb)
1411 return NULL;
1412 skb_reserve(skb, cqe_rx->align_pad);
1413 skb_put(skb, payload_len);
1414 } else {
1415 /* Add fragments */
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301416 dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
1417 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1418 DMA_ATTR_SKIP_CPU_SYNC);
1419 page = virt_to_page(phys_to_virt(phys_addr));
1420 offset = phys_to_virt(phys_addr) - page_address(page);
Sunil Gouthama8671ac2016-08-12 16:51:37 +05301421 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1422 offset, payload_len, RCV_FRAG_LEN);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001423 }
1424 /* Next buffer pointer */
1425 rb_ptrs++;
1426 }
1427 return skb;
1428}
1429
Yury Norovb45ceb42015-12-07 10:30:32 +05301430static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001431{
1432 u64 reg_val;
1433
Sunil Goutham4863dea2015-05-26 19:20:15 -07001434 switch (int_type) {
1435 case NICVF_INTR_CQ:
1436 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1437 break;
1438 case NICVF_INTR_SQ:
1439 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1440 break;
1441 case NICVF_INTR_RBDR:
1442 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1443 break;
1444 case NICVF_INTR_PKT_DROP:
1445 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1446 break;
1447 case NICVF_INTR_TCP_TIMER:
1448 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1449 break;
1450 case NICVF_INTR_MBOX:
1451 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1452 break;
1453 case NICVF_INTR_QS_ERR:
Yury Norovb45ceb42015-12-07 10:30:32 +05301454 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001455 break;
1456 default:
Yury Norovb45ceb42015-12-07 10:30:32 +05301457 reg_val = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001458 }
1459
Yury Norovb45ceb42015-12-07 10:30:32 +05301460 return reg_val;
1461}
1462
1463/* Enable interrupt */
1464void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1465{
1466 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1467
1468 if (!mask) {
1469 netdev_dbg(nic->netdev,
1470 "Failed to enable interrupt: unknown type\n");
1471 return;
1472 }
1473 nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1474 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
1475}
1476
1477/* Disable interrupt */
1478void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1479{
1480 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1481
1482 if (!mask) {
1483 netdev_dbg(nic->netdev,
1484 "Failed to disable interrupt: unknown type\n");
1485 return;
1486 }
1487
1488 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
1489}
1490
1491/* Clear interrupt */
1492void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1493{
1494 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1495
1496 if (!mask) {
1497 netdev_dbg(nic->netdev,
1498 "Failed to clear interrupt: unknown type\n");
1499 return;
1500 }
1501
1502 nicvf_reg_write(nic, NIC_VF_INT, mask);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001503}
1504
1505/* Check if interrupt is enabled */
1506int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1507{
Yury Norovb45ceb42015-12-07 10:30:32 +05301508 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1509 /* If interrupt type is unknown, we treat it disabled. */
1510 if (!mask) {
1511 netdev_dbg(nic->netdev,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001512 "Failed to check interrupt enable: unknown type\n");
Yury Norovb45ceb42015-12-07 10:30:32 +05301513 return 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001514 }
1515
Yury Norovb45ceb42015-12-07 10:30:32 +05301516 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001517}
1518
1519void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1520{
1521 struct rcv_queue *rq;
1522
1523#define GET_RQ_STATS(reg) \
1524 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1525 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1526
1527 rq = &nic->qs->rq[rq_idx];
1528 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1529 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1530}
1531
1532void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1533{
1534 struct snd_queue *sq;
1535
1536#define GET_SQ_STATS(reg) \
1537 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1538 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1539
1540 sq = &nic->qs->sq[sq_idx];
1541 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1542 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1543}
1544
1545/* Check for errors in the receive cmp.queue entry */
Sunil Gouthamad2eceb2016-02-16 16:29:51 +05301546int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001547{
Sunil Gouthamad2eceb2016-02-16 16:29:51 +05301548 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001549 return 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001550
1551 if (netif_msg_rx_err(nic))
1552 netdev_err(nic->netdev,
1553 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1554 nic->netdev->name,
1555 cqe_rx->err_level, cqe_rx->err_opcode);
1556
Sunil Goutham4863dea2015-05-26 19:20:15 -07001557 switch (cqe_rx->err_opcode) {
1558 case CQ_RX_ERROP_RE_PARTIAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301559 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001560 break;
1561 case CQ_RX_ERROP_RE_JABBER:
Sunil Goutham964cb692016-11-15 17:38:16 +05301562 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001563 break;
1564 case CQ_RX_ERROP_RE_FCS:
Sunil Goutham964cb692016-11-15 17:38:16 +05301565 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001566 break;
1567 case CQ_RX_ERROP_RE_RX_CTL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301568 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001569 break;
1570 case CQ_RX_ERROP_PREL2_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301571 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001572 break;
1573 case CQ_RX_ERROP_L2_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301574 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001575 break;
1576 case CQ_RX_ERROP_L2_OVERSIZE:
Sunil Goutham964cb692016-11-15 17:38:16 +05301577 this_cpu_inc(nic->drv_stats->rx_oversize);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001578 break;
1579 case CQ_RX_ERROP_L2_UNDERSIZE:
Sunil Goutham964cb692016-11-15 17:38:16 +05301580 this_cpu_inc(nic->drv_stats->rx_undersize);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001581 break;
1582 case CQ_RX_ERROP_L2_LENMISM:
Sunil Goutham964cb692016-11-15 17:38:16 +05301583 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001584 break;
1585 case CQ_RX_ERROP_L2_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301586 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001587 break;
1588 case CQ_RX_ERROP_IP_NOT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301589 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001590 break;
1591 case CQ_RX_ERROP_IP_CSUM_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301592 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001593 break;
1594 case CQ_RX_ERROP_IP_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301595 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001596 break;
1597 case CQ_RX_ERROP_IP_MALD:
Sunil Goutham964cb692016-11-15 17:38:16 +05301598 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001599 break;
1600 case CQ_RX_ERROP_IP_HOP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301601 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001602 break;
1603 case CQ_RX_ERROP_L3_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301604 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001605 break;
1606 case CQ_RX_ERROP_L4_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301607 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001608 break;
1609 case CQ_RX_ERROP_L4_CHK:
Sunil Goutham964cb692016-11-15 17:38:16 +05301610 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001611 break;
1612 case CQ_RX_ERROP_UDP_LEN:
Sunil Goutham964cb692016-11-15 17:38:16 +05301613 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001614 break;
1615 case CQ_RX_ERROP_L4_PORT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301616 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001617 break;
1618 case CQ_RX_ERROP_TCP_FLAG:
Sunil Goutham964cb692016-11-15 17:38:16 +05301619 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001620 break;
1621 case CQ_RX_ERROP_TCP_OFFSET:
Sunil Goutham964cb692016-11-15 17:38:16 +05301622 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001623 break;
1624 case CQ_RX_ERROP_L4_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301625 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001626 break;
1627 case CQ_RX_ERROP_RBDR_TRUNC:
Sunil Goutham964cb692016-11-15 17:38:16 +05301628 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001629 break;
1630 }
1631
1632 return 1;
1633}
1634
1635/* Check for errors in the send cmp.queue entry */
Sunil Goutham964cb692016-11-15 17:38:16 +05301636int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001637{
Sunil Goutham4863dea2015-05-26 19:20:15 -07001638 switch (cqe_tx->send_status) {
1639 case CQ_TX_ERROP_GOOD:
Sunil Goutham4863dea2015-05-26 19:20:15 -07001640 return 0;
1641 case CQ_TX_ERROP_DESC_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301642 this_cpu_inc(nic->drv_stats->tx_desc_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001643 break;
1644 case CQ_TX_ERROP_HDR_CONS_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301645 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001646 break;
1647 case CQ_TX_ERROP_SUBDC_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301648 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001649 break;
Sunil Goutham712c3182016-11-15 17:37:36 +05301650 case CQ_TX_ERROP_MAX_SIZE_VIOL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301651 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
Sunil Goutham712c3182016-11-15 17:37:36 +05301652 break;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001653 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
Sunil Goutham964cb692016-11-15 17:38:16 +05301654 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001655 break;
1656 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301657 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001658 break;
1659 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301660 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001661 break;
1662 case CQ_TX_ERROP_LOCK_VIOL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301663 this_cpu_inc(nic->drv_stats->tx_lock_viol);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001664 break;
1665 case CQ_TX_ERROP_DATA_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301666 this_cpu_inc(nic->drv_stats->tx_data_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001667 break;
1668 case CQ_TX_ERROP_TSTMP_CONFLICT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301669 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001670 break;
1671 case CQ_TX_ERROP_TSTMP_TIMEOUT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301672 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001673 break;
1674 case CQ_TX_ERROP_MEM_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301675 this_cpu_inc(nic->drv_stats->tx_mem_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001676 break;
1677 case CQ_TX_ERROP_CK_OVERLAP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301678 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001679 break;
1680 case CQ_TX_ERROP_CK_OFLOW:
Sunil Goutham964cb692016-11-15 17:38:16 +05301681 this_cpu_inc(nic->drv_stats->tx_csum_overflow);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001682 break;
1683 }
1684
1685 return 1;
1686}