blob: 3eae9ff9b53a69413c986bb189ece2dc84f597ea [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/pci.h>
10#include <linux/netdevice.h>
11#include <linux/ip.h>
12#include <linux/etherdevice.h>
Sunil Goutham83abb7d2017-03-07 18:09:08 +053013#include <linux/iommu.h>
Sunil Goutham4863dea2015-05-26 19:20:15 -070014#include <net/ip.h>
15#include <net/tso.h>
16
17#include "nic_reg.h"
18#include "nic.h"
19#include "q_struct.h"
20#include "nicvf_queues.h"
21
Sunil Goutham16f2bcc2017-05-02 18:36:56 +053022static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
23 int size, u64 data);
Sunil Goutham5c2e26f2016-03-14 16:36:14 +053024static void nicvf_get_page(struct nicvf *nic)
25{
26 if (!nic->rb_pageref || !nic->rb_page)
27 return;
28
Joonsoo Kim6d061f92016-05-19 17:10:46 -070029 page_ref_add(nic->rb_page, nic->rb_pageref);
Sunil Goutham5c2e26f2016-03-14 16:36:14 +053030 nic->rb_pageref = 0;
31}
32
Sunil Goutham4863dea2015-05-26 19:20:15 -070033/* Poll a register for a specific value */
34static int nicvf_poll_reg(struct nicvf *nic, int qidx,
35 u64 reg, int bit_pos, int bits, int val)
36{
37 u64 bit_mask;
38 u64 reg_val;
39 int timeout = 10;
40
41 bit_mask = (1ULL << bits) - 1;
42 bit_mask = (bit_mask << bit_pos);
43
44 while (timeout) {
45 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
46 if (((reg_val & bit_mask) >> bit_pos) == val)
47 return 0;
48 usleep_range(1000, 2000);
49 timeout--;
50 }
51 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
52 return 1;
53}
54
55/* Allocate memory for a queue's descriptors */
56static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
57 int q_len, int desc_size, int align_bytes)
58{
59 dmem->q_len = q_len;
60 dmem->size = (desc_size * q_len) + align_bytes;
61 /* Save address, need it while freeing */
62 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
63 &dmem->dma, GFP_KERNEL);
64 if (!dmem->unalign_base)
65 return -ENOMEM;
66
67 /* Align memory address for 'align_bytes' */
68 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
Aleksey Makarov39a0dd02015-06-02 11:00:25 -070069 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
Sunil Goutham4863dea2015-05-26 19:20:15 -070070 return 0;
71}
72
73/* Free queue's descriptor memory */
74static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
75{
76 if (!dmem)
77 return;
78
79 dma_free_coherent(&nic->pdev->dev, dmem->size,
80 dmem->unalign_base, dmem->dma);
81 dmem->unalign_base = NULL;
82 dmem->base = NULL;
83}
84
Sunil Goutham77322532017-05-02 18:36:58 +053085#define XDP_PAGE_REFCNT_REFILL 256
86
Sunil Goutham5836b442017-05-02 18:36:50 +053087/* Allocate a new page or recycle one if possible
88 *
89 * We cannot optimize dma mapping here, since
90 * 1. It's only one RBDR ring for 8 Rx queues.
91 * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed
92 * and not idx into RBDR ring, so can't refer to saved info.
93 * 3. There are multiple receive buffers per page
Sunil Goutham4863dea2015-05-26 19:20:15 -070094 */
Sunil Goutham77322532017-05-02 18:36:58 +053095static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
96 struct rbdr *rbdr, gfp_t gfp)
Sunil Goutham4863dea2015-05-26 19:20:15 -070097{
Sunil Goutham77322532017-05-02 18:36:58 +053098 int ref_count;
Sunil Goutham5836b442017-05-02 18:36:50 +053099 struct page *page = NULL;
100 struct pgcache *pgcache, *next;
101
102 /* Check if page is already allocated */
103 pgcache = &rbdr->pgcache[rbdr->pgidx];
104 page = pgcache->page;
105 /* Check if page can be recycled */
Sunil Goutham77322532017-05-02 18:36:58 +0530106 if (page) {
107 ref_count = page_ref_count(page);
108 /* Check if this page has been used once i.e 'put_page'
109 * called after packet transmission i.e internal ref_count
110 * and page's ref_count are equal i.e page can be recycled.
111 */
112 if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
113 pgcache->ref_count--;
114 else
115 page = NULL;
116
117 /* In non-XDP mode, page's ref_count needs to be '1' for it
118 * to be recycled.
119 */
120 if (!rbdr->is_xdp && (ref_count != 1))
121 page = NULL;
122 }
Sunil Goutham5836b442017-05-02 18:36:50 +0530123
124 if (!page) {
125 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
126 if (!page)
127 return NULL;
128
129 this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
130
131 /* Check for space */
132 if (rbdr->pgalloc >= rbdr->pgcnt) {
133 /* Page can still be used */
134 nic->rb_page = page;
135 return NULL;
136 }
137
138 /* Save the page in page cache */
139 pgcache->page = page;
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530140 pgcache->dma_addr = 0;
Sunil Goutham77322532017-05-02 18:36:58 +0530141 pgcache->ref_count = 0;
Sunil Goutham5836b442017-05-02 18:36:50 +0530142 rbdr->pgalloc++;
143 }
144
Sunil Goutham77322532017-05-02 18:36:58 +0530145 /* Take additional page references for recycling */
146 if (rbdr->is_xdp) {
147 /* Since there is single RBDR (i.e single core doing
148 * page recycling) per 8 Rx queues, in XDP mode adjusting
149 * page references atomically is the biggest bottleneck, so
150 * take bunch of references at a time.
151 *
152 * So here, below reference counts defer by '1'.
153 */
154 if (!pgcache->ref_count) {
155 pgcache->ref_count = XDP_PAGE_REFCNT_REFILL;
156 page_ref_add(page, XDP_PAGE_REFCNT_REFILL);
157 }
158 } else {
159 /* In non-XDP case, single 64K page is divided across multiple
160 * receive buffers, so cost of recycling is less anyway.
161 * So we can do with just one extra reference.
162 */
163 page_ref_add(page, 1);
164 }
Sunil Goutham5836b442017-05-02 18:36:50 +0530165
166 rbdr->pgidx++;
167 rbdr->pgidx &= (rbdr->pgcnt - 1);
168
169 /* Prefetch refcount of next page in page cache */
170 next = &rbdr->pgcache[rbdr->pgidx];
171 page = next->page;
172 if (page)
173 prefetch(&page->_refcount);
174
175 return pgcache;
176}
177
178/* Allocate buffer for packet reception */
179static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
Sunil Goutham927987f2017-05-02 18:36:53 +0530180 gfp_t gfp, u32 buf_len, u64 *rbuf)
Sunil Goutham5836b442017-05-02 18:36:50 +0530181{
182 struct pgcache *pgcache = NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700183
Sunil Goutham05c773f2017-05-02 18:36:54 +0530184 /* Check if request can be accomodated in previous allocated page.
185 * But in XDP mode only one buffer per page is permitted.
186 */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530187 if (!rbdr->is_xdp && nic->rb_page &&
Sunil Goutham5836b442017-05-02 18:36:50 +0530188 ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530189 nic->rb_pageref++;
190 goto ret;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700191 }
192
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530193 nicvf_get_page(nic);
Sunil Goutham5836b442017-05-02 18:36:50 +0530194 nic->rb_page = NULL;
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530195
Sunil Goutham5836b442017-05-02 18:36:50 +0530196 /* Get new page, either recycled or new one */
197 pgcache = nicvf_alloc_page(nic, rbdr, gfp);
198 if (!pgcache && !nic->rb_page) {
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530199 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
200 return -ENOMEM;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700201 }
Sunil Goutham5836b442017-05-02 18:36:50 +0530202
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530203 nic->rb_page_offset = 0;
Sunil Gouthame3d06ff2017-05-02 18:36:57 +0530204
205 /* Reserve space for header modifications by BPF program */
206 if (rbdr->is_xdp)
Sunil Gouthamaa136d02017-11-24 15:03:26 +0300207 buf_len += XDP_HEADROOM;
Sunil Gouthame3d06ff2017-05-02 18:36:57 +0530208
Sunil Goutham5836b442017-05-02 18:36:50 +0530209 /* Check if it's recycled */
210 if (pgcache)
211 nic->rb_page = pgcache->page;
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530212ret:
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530213 if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
214 *rbuf = pgcache->dma_addr;
215 } else {
216 /* HW will ensure data coherency, CPU sync not required */
217 *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
218 nic->rb_page_offset, buf_len,
219 DMA_FROM_DEVICE,
220 DMA_ATTR_SKIP_CPU_SYNC);
221 if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
222 if (!nic->rb_page_offset)
223 __free_pages(nic->rb_page, 0);
224 nic->rb_page = NULL;
225 return -ENOMEM;
226 }
Sunil Gouthamaa136d02017-11-24 15:03:26 +0300227
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530228 if (pgcache)
Sunil Gouthamaa136d02017-11-24 15:03:26 +0300229 pgcache->dma_addr = *rbuf + XDP_HEADROOM;
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530230 nic->rb_page_offset += buf_len;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530231 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700232
Sunil Goutham4863dea2015-05-26 19:20:15 -0700233 return 0;
234}
235
Sunil Goutham668dda02015-12-07 10:30:33 +0530236/* Build skb around receive buffer */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700237static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
238 u64 rb_ptr, int len)
239{
Sunil Goutham668dda02015-12-07 10:30:33 +0530240 void *data;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700241 struct sk_buff *skb;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700242
Sunil Goutham668dda02015-12-07 10:30:33 +0530243 data = phys_to_virt(rb_ptr);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700244
245 /* Now build an skb to give to stack */
Sunil Goutham668dda02015-12-07 10:30:33 +0530246 skb = build_skb(data, RCV_FRAG_LEN);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700247 if (!skb) {
Sunil Goutham668dda02015-12-07 10:30:33 +0530248 put_page(virt_to_page(data));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700249 return NULL;
250 }
251
Sunil Goutham668dda02015-12-07 10:30:33 +0530252 prefetch(skb->data);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700253 return skb;
254}
255
256/* Allocate RBDR ring and populate receive buffers */
257static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
258 int ring_len, int buf_size)
259{
260 int idx;
Sunil Goutham927987f2017-05-02 18:36:53 +0530261 u64 rbuf;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700262 struct rbdr_entry_t *desc;
263 int err;
264
265 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
266 sizeof(struct rbdr_entry_t),
267 NICVF_RCV_BUF_ALIGN_BYTES);
268 if (err)
269 return err;
270
271 rbdr->desc = rbdr->dmem.base;
272 /* Buffer size has to be in multiples of 128 bytes */
273 rbdr->dma_size = buf_size;
274 rbdr->enable = true;
275 rbdr->thresh = RBDR_THRESH;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530276 rbdr->head = 0;
277 rbdr->tail = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700278
Sunil Goutham5836b442017-05-02 18:36:50 +0530279 /* Initialize page recycling stuff.
280 *
281 * Can't use single buffer per page especially with 64K pages.
282 * On embedded platforms i.e 81xx/83xx available memory itself
283 * is low and minimum ring size of RBDR is 8K, that takes away
284 * lots of memory.
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530285 *
286 * But for XDP it has to be a single buffer per page.
Sunil Goutham5836b442017-05-02 18:36:50 +0530287 */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530288 if (!nic->pnicvf->xdp_prog) {
289 rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
290 rbdr->is_xdp = false;
291 } else {
292 rbdr->pgcnt = ring_len;
293 rbdr->is_xdp = true;
294 }
Sunil Goutham5836b442017-05-02 18:36:50 +0530295 rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
296 rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) *
297 rbdr->pgcnt, GFP_KERNEL);
298 if (!rbdr->pgcache)
299 return -ENOMEM;
300 rbdr->pgidx = 0;
301 rbdr->pgalloc = 0;
302
Sunil Goutham4863dea2015-05-26 19:20:15 -0700303 nic->rb_page = NULL;
304 for (idx = 0; idx < ring_len; idx++) {
Sunil Goutham5836b442017-05-02 18:36:50 +0530305 err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
306 RCV_FRAG_LEN, &rbuf);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530307 if (err) {
308 /* To free already allocated and mapped ones */
309 rbdr->tail = idx - 1;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700310 return err;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530311 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700312
313 desc = GET_RBDR_DESC(rbdr, idx);
Sunil Goutham927987f2017-05-02 18:36:53 +0530314 desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700315 }
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530316
317 nicvf_get_page(nic);
318
Sunil Goutham4863dea2015-05-26 19:20:15 -0700319 return 0;
320}
321
322/* Free RBDR ring and its receive buffers */
323static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
324{
325 int head, tail;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530326 u64 buf_addr, phys_addr;
Sunil Goutham5836b442017-05-02 18:36:50 +0530327 struct pgcache *pgcache;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700328 struct rbdr_entry_t *desc;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700329
330 if (!rbdr)
331 return;
332
333 rbdr->enable = false;
334 if (!rbdr->dmem.base)
335 return;
336
337 head = rbdr->head;
338 tail = rbdr->tail;
339
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530340 /* Release page references */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700341 while (head != tail) {
342 desc = GET_RBDR_DESC(rbdr, head);
Sunil Goutham5e848e42017-05-02 18:36:51 +0530343 buf_addr = desc->buf_addr;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530344 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
345 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
346 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
347 if (phys_addr)
348 put_page(virt_to_page(phys_to_virt(phys_addr)));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700349 head++;
350 head &= (rbdr->dmem.q_len - 1);
351 }
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530352 /* Release buffer of tail desc */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700353 desc = GET_RBDR_DESC(rbdr, tail);
Sunil Goutham5e848e42017-05-02 18:36:51 +0530354 buf_addr = desc->buf_addr;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530355 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
356 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
357 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
358 if (phys_addr)
359 put_page(virt_to_page(phys_to_virt(phys_addr)));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700360
Sunil Goutham5836b442017-05-02 18:36:50 +0530361 /* Sync page cache info */
362 smp_rmb();
363
364 /* Release additional page references held for recycling */
365 head = 0;
366 while (head < rbdr->pgcnt) {
367 pgcache = &rbdr->pgcache[head];
Sunil Goutham77322532017-05-02 18:36:58 +0530368 if (pgcache->page && page_ref_count(pgcache->page) != 0) {
369 if (!rbdr->is_xdp) {
370 put_page(pgcache->page);
371 continue;
372 }
373 page_ref_sub(pgcache->page, pgcache->ref_count - 1);
Sunil Goutham5836b442017-05-02 18:36:50 +0530374 put_page(pgcache->page);
Sunil Goutham77322532017-05-02 18:36:58 +0530375 }
Sunil Goutham5836b442017-05-02 18:36:50 +0530376 head++;
377 }
378
Sunil Goutham4863dea2015-05-26 19:20:15 -0700379 /* Free RBDR ring */
380 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
381}
382
383/* Refill receive buffer descriptors with new buffers.
384 */
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700385static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700386{
387 struct queue_set *qs = nic->qs;
388 int rbdr_idx = qs->rbdr_cnt;
389 int tail, qcount;
390 int refill_rb_cnt;
391 struct rbdr *rbdr;
392 struct rbdr_entry_t *desc;
Sunil Goutham927987f2017-05-02 18:36:53 +0530393 u64 rbuf;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700394 int new_rb = 0;
395
396refill:
397 if (!rbdr_idx)
398 return;
399 rbdr_idx--;
400 rbdr = &qs->rbdr[rbdr_idx];
401 /* Check if it's enabled */
402 if (!rbdr->enable)
403 goto next_rbdr;
404
405 /* Get no of desc's to be refilled */
406 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
407 qcount &= 0x7FFFF;
408 /* Doorbell can be ringed with a max of ring size minus 1 */
409 if (qcount >= (qs->rbdr_len - 1))
410 goto next_rbdr;
411 else
412 refill_rb_cnt = qs->rbdr_len - qcount - 1;
413
Sunil Goutham5836b442017-05-02 18:36:50 +0530414 /* Sync page cache info */
415 smp_rmb();
416
Sunil Goutham4863dea2015-05-26 19:20:15 -0700417 /* Start filling descs from tail */
418 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
419 while (refill_rb_cnt) {
420 tail++;
421 tail &= (rbdr->dmem.q_len - 1);
422
Sunil Goutham5836b442017-05-02 18:36:50 +0530423 if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700424 break;
425
426 desc = GET_RBDR_DESC(rbdr, tail);
Sunil Goutham927987f2017-05-02 18:36:53 +0530427 desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700428 refill_rb_cnt--;
429 new_rb++;
430 }
431
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530432 nicvf_get_page(nic);
433
Sunil Goutham4863dea2015-05-26 19:20:15 -0700434 /* make sure all memory stores are done before ringing doorbell */
435 smp_wmb();
436
437 /* Check if buffer allocation failed */
438 if (refill_rb_cnt)
439 nic->rb_alloc_fail = true;
440 else
441 nic->rb_alloc_fail = false;
442
443 /* Notify HW */
444 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
445 rbdr_idx, new_rb);
446next_rbdr:
447 /* Re-enable RBDR interrupts only if buffer allocation is success */
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530448 if (!nic->rb_alloc_fail && rbdr->enable &&
449 netif_running(nic->pnicvf->netdev))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700450 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
451
452 if (rbdr_idx)
453 goto refill;
454}
455
456/* Alloc rcv buffers in non-atomic mode for better success */
457void nicvf_rbdr_work(struct work_struct *work)
458{
459 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
460
461 nicvf_refill_rbdr(nic, GFP_KERNEL);
462 if (nic->rb_alloc_fail)
463 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
464 else
465 nic->rb_work_scheduled = false;
466}
467
468/* In Softirq context, alloc rcv buffers in atomic mode */
469void nicvf_rbdr_task(unsigned long data)
470{
471 struct nicvf *nic = (struct nicvf *)data;
472
473 nicvf_refill_rbdr(nic, GFP_ATOMIC);
474 if (nic->rb_alloc_fail) {
475 nic->rb_work_scheduled = true;
476 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
477 }
478}
479
480/* Initialize completion queue */
481static int nicvf_init_cmp_queue(struct nicvf *nic,
482 struct cmp_queue *cq, int q_len)
483{
484 int err;
485
486 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
487 NICVF_CQ_BASE_ALIGN_BYTES);
488 if (err)
489 return err;
490
491 cq->desc = cq->dmem.base;
Sunil Gouthamb9687b42015-12-10 13:25:20 +0530492 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700493 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
494
495 return 0;
496}
497
498static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
499{
500 if (!cq)
501 return;
502 if (!cq->dmem.base)
503 return;
504
505 nicvf_free_q_desc_mem(nic, &cq->dmem);
506}
507
508/* Initialize transmit queue */
509static int nicvf_init_snd_queue(struct nicvf *nic,
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530510 struct snd_queue *sq, int q_len, int qidx)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700511{
512 int err;
513
514 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
515 NICVF_SQ_BASE_ALIGN_BYTES);
516 if (err)
517 return err;
518
519 sq->desc = sq->dmem.base;
Aleksey Makarov86ace692015-06-02 11:00:27 -0700520 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
Aleksey Makarovfa1a6c92015-06-02 11:00:26 -0700521 if (!sq->skbuff)
522 return -ENOMEM;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530523
Sunil Goutham4863dea2015-05-26 19:20:15 -0700524 sq->head = 0;
525 sq->tail = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700526 sq->thresh = SND_QUEUE_THRESH;
527
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530528 /* Check if this SQ is a XDP TX queue */
529 if (nic->sqs_mode)
530 qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS);
531 if (qidx < nic->pnicvf->xdp_tx_queues) {
532 /* Alloc memory to save page pointers for XDP_TX */
533 sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
534 if (!sq->xdp_page)
535 return -ENOMEM;
536 sq->xdp_desc_cnt = 0;
537 sq->xdp_free_cnt = q_len - 1;
538 sq->is_xdp = true;
539 } else {
540 sq->xdp_page = NULL;
541 sq->xdp_desc_cnt = 0;
542 sq->xdp_free_cnt = 0;
543 sq->is_xdp = false;
544
545 atomic_set(&sq->free_cnt, q_len - 1);
546
547 /* Preallocate memory for TSO segment's header */
548 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
549 q_len * TSO_HEADER_SIZE,
550 &sq->tso_hdrs_phys,
551 GFP_KERNEL);
552 if (!sq->tso_hdrs)
553 return -ENOMEM;
554 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700555
556 return 0;
557}
558
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530559void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
560 int hdr_sqe, u8 subdesc_cnt)
561{
562 u8 idx;
563 struct sq_gather_subdesc *gather;
564
565 /* Unmap DMA mapped skb data buffers */
566 for (idx = 0; idx < subdesc_cnt; idx++) {
567 hdr_sqe++;
568 hdr_sqe &= (sq->dmem.q_len - 1);
569 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
570 /* HW will ensure data coherency, CPU sync not required */
571 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
572 gather->size, DMA_TO_DEVICE,
573 DMA_ATTR_SKIP_CPU_SYNC);
574 }
575}
576
Sunil Goutham4863dea2015-05-26 19:20:15 -0700577static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
578{
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530579 struct sk_buff *skb;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530580 struct page *page;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530581 struct sq_hdr_subdesc *hdr;
582 struct sq_hdr_subdesc *tso_sqe;
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530583
Sunil Goutham4863dea2015-05-26 19:20:15 -0700584 if (!sq)
585 return;
586 if (!sq->dmem.base)
587 return;
588
589 if (sq->tso_hdrs)
Sunil Goutham143ceb02015-07-29 16:49:37 +0300590 dma_free_coherent(&nic->pdev->dev,
591 sq->dmem.q_len * TSO_HEADER_SIZE,
Sunil Goutham4863dea2015-05-26 19:20:15 -0700592 sq->tso_hdrs, sq->tso_hdrs_phys);
593
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530594 /* Free pending skbs in the queue */
595 smp_rmb();
596 while (sq->head != sq->tail) {
597 skb = (struct sk_buff *)sq->skbuff[sq->head];
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530598 if (!skb || !sq->xdp_page)
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530599 goto next;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530600
601 page = (struct page *)sq->xdp_page[sq->head];
602 if (!page)
603 goto next;
604 else
605 put_page(page);
606
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530607 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
608 /* Check for dummy descriptor used for HW TSO offload on 88xx */
609 if (hdr->dont_send) {
610 /* Get actual TSO descriptors and unmap them */
611 tso_sqe =
612 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
613 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
614 tso_sqe->subdesc_cnt);
615 } else {
616 nicvf_unmap_sndq_buffers(nic, sq, sq->head,
617 hdr->subdesc_cnt);
618 }
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530619 if (skb)
620 dev_kfree_skb_any(skb);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530621next:
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530622 sq->head++;
623 sq->head &= (sq->dmem.q_len - 1);
624 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700625 kfree(sq->skbuff);
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530626 kfree(sq->xdp_page);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700627 nicvf_free_q_desc_mem(nic, &sq->dmem);
628}
629
630static void nicvf_reclaim_snd_queue(struct nicvf *nic,
631 struct queue_set *qs, int qidx)
632{
633 /* Disable send queue */
634 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
635 /* Check if SQ is stopped */
636 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
637 return;
638 /* Reset send queue */
639 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
640}
641
642static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
643 struct queue_set *qs, int qidx)
644{
645 union nic_mbx mbx = {};
646
647 /* Make sure all packets in the pipeline are written back into mem */
648 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
649 nicvf_send_msg_to_pf(nic, &mbx);
650}
651
652static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
653 struct queue_set *qs, int qidx)
654{
655 /* Disable timer threshold (doesn't get reset upon CQ reset */
656 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
657 /* Disable completion queue */
658 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
659 /* Reset completion queue */
660 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
661}
662
663static void nicvf_reclaim_rbdr(struct nicvf *nic,
664 struct rbdr *rbdr, int qidx)
665{
666 u64 tmp, fifo_state;
667 int timeout = 10;
668
669 /* Save head and tail pointers for feeing up buffers */
670 rbdr->head = nicvf_queue_reg_read(nic,
671 NIC_QSET_RBDR_0_1_HEAD,
672 qidx) >> 3;
673 rbdr->tail = nicvf_queue_reg_read(nic,
674 NIC_QSET_RBDR_0_1_TAIL,
675 qidx) >> 3;
676
677 /* If RBDR FIFO is in 'FAIL' state then do a reset first
678 * before relaiming.
679 */
680 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
681 if (((fifo_state >> 62) & 0x03) == 0x3)
682 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
683 qidx, NICVF_RBDR_RESET);
684
685 /* Disable RBDR */
686 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
687 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
688 return;
689 while (1) {
690 tmp = nicvf_queue_reg_read(nic,
691 NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
692 qidx);
693 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
694 break;
695 usleep_range(1000, 2000);
696 timeout--;
697 if (!timeout) {
698 netdev_err(nic->netdev,
699 "Failed polling on prefetch status\n");
700 return;
701 }
702 }
703 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
704 qidx, NICVF_RBDR_RESET);
705
706 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
707 return;
708 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
709 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
710 return;
711}
712
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300713void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
714{
715 u64 rq_cfg;
716 int sqs;
717
718 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
719
720 /* Enable first VLAN stripping */
721 if (features & NETIF_F_HW_VLAN_CTAG_RX)
722 rq_cfg |= (1ULL << 25);
723 else
724 rq_cfg &= ~(1ULL << 25);
725 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
726
727 /* Configure Secondary Qsets, if any */
728 for (sqs = 0; sqs < nic->sqs_count; sqs++)
729 if (nic->snicvf[sqs])
730 nicvf_queue_reg_write(nic->snicvf[sqs],
731 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
732}
733
Jerin Jacob3458c402016-08-12 16:51:39 +0530734static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
735{
736 union nic_mbx mbx = {};
737
Sunil Goutham964cb692016-11-15 17:38:16 +0530738 /* Reset all RQ/SQ and VF stats */
Jerin Jacob3458c402016-08-12 16:51:39 +0530739 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
Sunil Goutham964cb692016-11-15 17:38:16 +0530740 mbx.reset_stat.rx_stat_mask = 0x3FFF;
741 mbx.reset_stat.tx_stat_mask = 0x1F;
Jerin Jacob3458c402016-08-12 16:51:39 +0530742 mbx.reset_stat.rq_stat_mask = 0xFFFF;
Sunil Goutham964cb692016-11-15 17:38:16 +0530743 mbx.reset_stat.sq_stat_mask = 0xFFFF;
Jerin Jacob3458c402016-08-12 16:51:39 +0530744 nicvf_send_msg_to_pf(nic, &mbx);
745}
746
Sunil Goutham4863dea2015-05-26 19:20:15 -0700747/* Configures receive queue */
748static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
749 int qidx, bool enable)
750{
751 union nic_mbx mbx = {};
752 struct rcv_queue *rq;
753 struct rq_cfg rq_cfg;
754
755 rq = &qs->rq[qidx];
756 rq->enable = enable;
757
758 /* Disable receive queue */
759 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
760
761 if (!rq->enable) {
762 nicvf_reclaim_rcv_queue(nic, qs, qidx);
Jesper Dangaard Brouer27e95e32018-01-03 11:25:54 +0100763 xdp_rxq_info_unreg(&rq->xdp_rxq);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700764 return;
765 }
766
767 rq->cq_qs = qs->vnic_id;
768 rq->cq_idx = qidx;
769 rq->start_rbdr_qs = qs->vnic_id;
770 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
771 rq->cont_rbdr_qs = qs->vnic_id;
772 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
773 /* all writes of RBDR data to be loaded into L2 Cache as well*/
774 rq->caching = 1;
775
Jesper Dangaard Brouer27e95e32018-01-03 11:25:54 +0100776 /* Driver have no proper error path for failed XDP RX-queue info reg */
777 WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
778
Sunil Goutham4863dea2015-05-26 19:20:15 -0700779 /* Send a mailbox msg to PF to config RQ */
780 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
781 mbx.rq.qs_num = qs->vnic_id;
782 mbx.rq.rq_num = qidx;
783 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
784 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
785 (rq->cont_qs_rbdr_idx << 8) |
786 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
787 nicvf_send_msg_to_pf(nic, &mbx);
788
789 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
Sunil Gouthamd5b2d7a2016-11-24 14:48:02 +0530790 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
791 (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
792 (qs->vnic_id << 0);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700793 nicvf_send_msg_to_pf(nic, &mbx);
794
795 /* RQ drop config
796 * Enable CQ drop to reserve sufficient CQEs for all tx packets
797 */
798 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
Sunil Gouthamd5b2d7a2016-11-24 14:48:02 +0530799 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
800 (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
801 (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700802 nicvf_send_msg_to_pf(nic, &mbx);
803
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530804 if (!nic->sqs_mode && (qidx == 0)) {
Thanneeru Srinivasulu36fa35d2017-03-07 18:09:11 +0530805 /* Enable checking L3/L4 length and TCP/UDP checksums
806 * Also allow IPv6 pkts with zero UDP checksum.
807 */
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530808 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
Thanneeru Srinivasulu36fa35d2017-03-07 18:09:11 +0530809 (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300810 nicvf_config_vlan_stripping(nic, nic->netdev->features);
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530811 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700812
813 /* Enable Receive queue */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200814 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700815 rq_cfg.ena = 1;
816 rq_cfg.tcp_ena = 0;
817 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
818}
819
820/* Configures completion queue */
821void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
822 int qidx, bool enable)
823{
824 struct cmp_queue *cq;
825 struct cq_cfg cq_cfg;
826
827 cq = &qs->cq[qidx];
828 cq->enable = enable;
829
830 if (!cq->enable) {
831 nicvf_reclaim_cmp_queue(nic, qs, qidx);
832 return;
833 }
834
835 /* Reset completion queue */
836 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
837
838 if (!cq->enable)
839 return;
840
841 spin_lock_init(&cq->lock);
842 /* Set completion queue base address */
843 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
844 qidx, (u64)(cq->dmem.phys_base));
845
846 /* Enable Completion queue */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200847 memset(&cq_cfg, 0, sizeof(struct cq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700848 cq_cfg.ena = 1;
849 cq_cfg.reset = 0;
850 cq_cfg.caching = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530851 cq_cfg.qsize = ilog2(qs->cq_len >> 10);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700852 cq_cfg.avg_con = 0;
853 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
854
855 /* Set threshold value for interrupt generation */
856 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
857 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
Sunil Goutham006394a2015-12-02 15:36:15 +0530858 qidx, CMP_QUEUE_TIMER_THRESH);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700859}
860
861/* Configures transmit queue */
862static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
863 int qidx, bool enable)
864{
865 union nic_mbx mbx = {};
866 struct snd_queue *sq;
867 struct sq_cfg sq_cfg;
868
869 sq = &qs->sq[qidx];
870 sq->enable = enable;
871
872 if (!sq->enable) {
873 nicvf_reclaim_snd_queue(nic, qs, qidx);
874 return;
875 }
876
877 /* Reset send queue */
878 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
879
880 sq->cq_qs = qs->vnic_id;
881 sq->cq_idx = qidx;
882
883 /* Send a mailbox msg to PF to config SQ */
884 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
885 mbx.sq.qs_num = qs->vnic_id;
886 mbx.sq.sq_num = qidx;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300887 mbx.sq.sqs_mode = nic->sqs_mode;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700888 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
889 nicvf_send_msg_to_pf(nic, &mbx);
890
891 /* Set queue base address */
892 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
893 qidx, (u64)(sq->dmem.phys_base));
894
895 /* Enable send queue & set queue size */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200896 memset(&sq_cfg, 0, sizeof(struct sq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700897 sq_cfg.ena = 1;
898 sq_cfg.reset = 0;
899 sq_cfg.ldwb = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530900 sq_cfg.qsize = ilog2(qs->sq_len >> 10);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700901 sq_cfg.tstmp_bgx_intf = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530902 /* CQ's level at which HW will stop processing SQEs to avoid
903 * transmitting a pkt with no space in CQ to post CQE_TX.
904 */
905 sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700906 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
907
908 /* Set threshold value for interrupt generation */
909 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
910
911 /* Set queue:cpu affinity for better load distribution */
912 if (cpu_online(qidx)) {
913 cpumask_set_cpu(qidx, &sq->affinity_mask);
914 netif_set_xps_queue(nic->netdev,
915 &sq->affinity_mask, qidx);
916 }
917}
918
919/* Configures receive buffer descriptor ring */
920static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
921 int qidx, bool enable)
922{
923 struct rbdr *rbdr;
924 struct rbdr_cfg rbdr_cfg;
925
926 rbdr = &qs->rbdr[qidx];
927 nicvf_reclaim_rbdr(nic, rbdr, qidx);
928 if (!enable)
929 return;
930
931 /* Set descriptor base address */
932 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
933 qidx, (u64)(rbdr->dmem.phys_base));
934
935 /* Enable RBDR & set queue size */
936 /* Buffer size should be in multiples of 128 bytes */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200937 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700938 rbdr_cfg.ena = 1;
939 rbdr_cfg.reset = 0;
940 rbdr_cfg.ldwb = 0;
941 rbdr_cfg.qsize = RBDR_SIZE;
942 rbdr_cfg.avg_con = 0;
943 rbdr_cfg.lines = rbdr->dma_size / 128;
944 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
945 qidx, *(u64 *)&rbdr_cfg);
946
947 /* Notify HW */
948 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
949 qidx, qs->rbdr_len - 1);
950
951 /* Set threshold value for interrupt generation */
952 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
953 qidx, rbdr->thresh - 1);
954}
955
956/* Requests PF to assign and enable Qset */
957void nicvf_qset_config(struct nicvf *nic, bool enable)
958{
959 union nic_mbx mbx = {};
960 struct queue_set *qs = nic->qs;
961 struct qs_cfg *qs_cfg;
962
963 if (!qs) {
964 netdev_warn(nic->netdev,
965 "Qset is still not allocated, don't init queues\n");
966 return;
967 }
968
969 qs->enable = enable;
970 qs->vnic_id = nic->vf_id;
971
972 /* Send a mailbox msg to PF to config Qset */
973 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
974 mbx.qs.num = qs->vnic_id;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300975 mbx.qs.sqs_count = nic->sqs_count;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700976
977 mbx.qs.cfg = 0;
978 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
979 if (qs->enable) {
980 qs_cfg->ena = 1;
981#ifdef __BIG_ENDIAN
982 qs_cfg->be = 1;
983#endif
984 qs_cfg->vnic = qs->vnic_id;
Sunil Goutham4a875502018-01-15 18:44:57 +0600985 /* Enable Tx timestamping capability */
986 if (nic->ptp_clock)
987 qs_cfg->send_tstmp_ena = 1;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700988 }
989 nicvf_send_msg_to_pf(nic, &mbx);
990}
991
992static void nicvf_free_resources(struct nicvf *nic)
993{
994 int qidx;
995 struct queue_set *qs = nic->qs;
996
997 /* Free receive buffer descriptor ring */
998 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
999 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1000
1001 /* Free completion queue */
1002 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1003 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1004
1005 /* Free send queue */
1006 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1007 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1008}
1009
1010static int nicvf_alloc_resources(struct nicvf *nic)
1011{
1012 int qidx;
1013 struct queue_set *qs = nic->qs;
1014
1015 /* Alloc receive buffer descriptor ring */
1016 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1017 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1018 DMA_BUFFER_LEN))
1019 goto alloc_fail;
1020 }
1021
1022 /* Alloc send queue */
1023 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
Sunil Goutham16f2bcc2017-05-02 18:36:56 +05301024 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
Sunil Goutham4863dea2015-05-26 19:20:15 -07001025 goto alloc_fail;
1026 }
1027
1028 /* Alloc completion queue */
1029 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1030 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
1031 goto alloc_fail;
1032 }
1033
1034 return 0;
1035alloc_fail:
1036 nicvf_free_resources(nic);
1037 return -ENOMEM;
1038}
1039
1040int nicvf_set_qset_resources(struct nicvf *nic)
1041{
1042 struct queue_set *qs;
1043
1044 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
1045 if (!qs)
1046 return -ENOMEM;
1047 nic->qs = qs;
1048
1049 /* Set count of each queue */
Sunil Goutham3a397eb2016-08-12 16:51:27 +05301050 qs->rbdr_cnt = DEFAULT_RBDR_CNT;
1051 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
1052 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
1053 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001054
1055 /* Set queue lengths */
1056 qs->rbdr_len = RCV_BUF_COUNT;
1057 qs->sq_len = SND_QUEUE_LEN;
1058 qs->cq_len = CMP_QUEUE_LEN;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001059
1060 nic->rx_queues = qs->rq_cnt;
1061 nic->tx_queues = qs->sq_cnt;
Sunil Goutham05c773f2017-05-02 18:36:54 +05301062 nic->xdp_tx_queues = 0;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001063
Sunil Goutham4863dea2015-05-26 19:20:15 -07001064 return 0;
1065}
1066
1067int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
1068{
1069 bool disable = false;
1070 struct queue_set *qs = nic->qs;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +05301071 struct queue_set *pqs = nic->pnicvf->qs;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001072 int qidx;
1073
1074 if (!qs)
1075 return 0;
1076
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +05301077 /* Take primary VF's queue lengths.
1078 * This is needed to take queue lengths set from ethtool
1079 * into consideration.
1080 */
1081 if (nic->sqs_mode && pqs) {
1082 qs->cq_len = pqs->cq_len;
1083 qs->sq_len = pqs->sq_len;
1084 }
1085
Sunil Goutham4863dea2015-05-26 19:20:15 -07001086 if (enable) {
1087 if (nicvf_alloc_resources(nic))
1088 return -ENOMEM;
1089
1090 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1091 nicvf_snd_queue_config(nic, qs, qidx, enable);
1092 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1093 nicvf_cmp_queue_config(nic, qs, qidx, enable);
1094 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1095 nicvf_rbdr_config(nic, qs, qidx, enable);
1096 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1097 nicvf_rcv_queue_config(nic, qs, qidx, enable);
1098 } else {
1099 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1100 nicvf_rcv_queue_config(nic, qs, qidx, disable);
1101 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1102 nicvf_rbdr_config(nic, qs, qidx, disable);
1103 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1104 nicvf_snd_queue_config(nic, qs, qidx, disable);
1105 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1106 nicvf_cmp_queue_config(nic, qs, qidx, disable);
1107
1108 nicvf_free_resources(nic);
1109 }
1110
Jerin Jacob3458c402016-08-12 16:51:39 +05301111 /* Reset RXQ's stats.
1112 * SQ's stats will get reset automatically once SQ is reset.
1113 */
1114 nicvf_reset_rcv_queue_stats(nic);
1115
Sunil Goutham4863dea2015-05-26 19:20:15 -07001116 return 0;
1117}
1118
1119/* Get a free desc from SQ
1120 * returns descriptor ponter & descriptor number
1121 */
1122static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1123{
1124 int qentry;
1125
1126 qentry = sq->tail;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +05301127 if (!sq->is_xdp)
1128 atomic_sub(desc_cnt, &sq->free_cnt);
1129 else
1130 sq->xdp_free_cnt -= desc_cnt;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001131 sq->tail += desc_cnt;
1132 sq->tail &= (sq->dmem.q_len - 1);
1133
1134 return qentry;
1135}
1136
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301137/* Rollback to previous tail pointer when descriptors not used */
1138static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
1139 int qentry, int desc_cnt)
1140{
1141 sq->tail = qentry;
1142 atomic_add(desc_cnt, &sq->free_cnt);
1143}
1144
Sunil Goutham4863dea2015-05-26 19:20:15 -07001145/* Free descriptor back to SQ for future use */
1146void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1147{
Sunil Goutham16f2bcc2017-05-02 18:36:56 +05301148 if (!sq->is_xdp)
1149 atomic_add(desc_cnt, &sq->free_cnt);
1150 else
1151 sq->xdp_free_cnt += desc_cnt;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001152 sq->head += desc_cnt;
1153 sq->head &= (sq->dmem.q_len - 1);
1154}
1155
1156static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1157{
1158 qentry++;
1159 qentry &= (sq->dmem.q_len - 1);
1160 return qentry;
1161}
1162
1163void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1164{
1165 u64 sq_cfg;
1166
1167 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1168 sq_cfg |= NICVF_SQ_EN;
1169 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1170 /* Ring doorbell so that H/W restarts processing SQEs */
1171 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1172}
1173
1174void nicvf_sq_disable(struct nicvf *nic, int qidx)
1175{
1176 u64 sq_cfg;
1177
1178 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1179 sq_cfg &= ~NICVF_SQ_EN;
1180 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1181}
1182
1183void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
1184 int qidx)
1185{
1186 u64 head, tail;
1187 struct sk_buff *skb;
1188 struct nicvf *nic = netdev_priv(netdev);
1189 struct sq_hdr_subdesc *hdr;
1190
1191 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1192 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1193 while (sq->head != head) {
1194 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1195 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1196 nicvf_put_sq_desc(sq, 1);
1197 continue;
1198 }
1199 skb = (struct sk_buff *)sq->skbuff[sq->head];
Sunil Goutham143ceb02015-07-29 16:49:37 +03001200 if (skb)
1201 dev_kfree_skb_any(skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001202 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
1203 atomic64_add(hdr->tot_len,
1204 (atomic64_t *)&netdev->stats.tx_bytes);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001205 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1206 }
1207}
1208
Sunil Goutham16f2bcc2017-05-02 18:36:56 +05301209/* XDP Transmit APIs */
1210void nicvf_xdp_sq_doorbell(struct nicvf *nic,
1211 struct snd_queue *sq, int sq_num)
1212{
1213 if (!sq->xdp_desc_cnt)
1214 return;
1215
1216 /* make sure all memory stores are done before ringing doorbell */
1217 wmb();
1218
1219 /* Inform HW to xmit all TSO segments */
1220 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1221 sq_num, sq->xdp_desc_cnt);
1222 sq->xdp_desc_cnt = 0;
1223}
1224
1225static inline void
1226nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1227 int subdesc_cnt, u64 data, int len)
1228{
1229 struct sq_hdr_subdesc *hdr;
1230
1231 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1232 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1233 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1234 hdr->subdesc_cnt = subdesc_cnt;
1235 hdr->tot_len = len;
1236 hdr->post_cqe = 1;
1237 sq->xdp_page[qentry] = (u64)virt_to_page((void *)data);
1238}
1239
1240int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
1241 u64 bufaddr, u64 dma_addr, u16 len)
1242{
1243 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
1244 int qentry;
1245
1246 if (subdesc_cnt > sq->xdp_free_cnt)
Sunil Gouthamaa136d02017-11-24 15:03:26 +03001247 return -1;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +05301248
1249 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1250
1251 nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len);
1252
1253 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1254 nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr);
1255
1256 sq->xdp_desc_cnt += subdesc_cnt;
1257
Sunil Gouthamaa136d02017-11-24 15:03:26 +03001258 return 0;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +05301259}
1260
Sunil Goutham4863dea2015-05-26 19:20:15 -07001261/* Calculate no of SQ subdescriptors needed to transmit all
1262 * segments of this TSO packet.
1263 * Taken from 'Tilera network driver' with a minor modification.
1264 */
1265static int nicvf_tso_count_subdescs(struct sk_buff *skb)
1266{
1267 struct skb_shared_info *sh = skb_shinfo(skb);
1268 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1269 unsigned int data_len = skb->len - sh_len;
1270 unsigned int p_len = sh->gso_size;
1271 long f_id = -1; /* id of the current fragment */
1272 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1273 long f_used = 0; /* bytes used from the current fragment */
1274 long n; /* size of the current piece of payload */
1275 int num_edescs = 0;
1276 int segment;
1277
1278 for (segment = 0; segment < sh->gso_segs; segment++) {
1279 unsigned int p_used = 0;
1280
1281 /* One edesc for header and for each piece of the payload. */
1282 for (num_edescs++; p_used < p_len; num_edescs++) {
1283 /* Advance as needed. */
1284 while (f_used >= f_size) {
1285 f_id++;
1286 f_size = skb_frag_size(&sh->frags[f_id]);
1287 f_used = 0;
1288 }
1289
1290 /* Use bytes from the current fragment. */
1291 n = p_len - p_used;
1292 if (n > f_size - f_used)
1293 n = f_size - f_used;
1294 f_used += n;
1295 p_used += n;
1296 }
1297
1298 /* The last segment may be less than gso_size. */
1299 data_len -= p_len;
1300 if (data_len < p_len)
1301 p_len = data_len;
1302 }
1303
1304 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
1305 return num_edescs + sh->gso_segs;
1306}
1307
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301308#define POST_CQE_DESC_COUNT 2
1309
Sunil Goutham4863dea2015-05-26 19:20:15 -07001310/* Get the number of SQ descriptors needed to xmit this skb */
1311static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
1312{
1313 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
1314
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301315 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001316 subdesc_cnt = nicvf_tso_count_subdescs(skb);
1317 return subdesc_cnt;
1318 }
1319
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301320 /* Dummy descriptors to get TSO pkt completion notification */
1321 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
1322 subdesc_cnt += POST_CQE_DESC_COUNT;
1323
Sunil Goutham4863dea2015-05-26 19:20:15 -07001324 if (skb_shinfo(skb)->nr_frags)
1325 subdesc_cnt += skb_shinfo(skb)->nr_frags;
1326
1327 return subdesc_cnt;
1328}
1329
1330/* Add SQ HEADER subdescriptor.
1331 * First subdescriptor for every send descriptor.
1332 */
1333static inline void
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301334nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001335 int subdesc_cnt, struct sk_buff *skb, int len)
1336{
1337 int proto;
1338 struct sq_hdr_subdesc *hdr;
Thanneeru Srinivasulu3a9024f2017-04-06 16:12:26 +05301339 union {
1340 struct iphdr *v4;
1341 struct ipv6hdr *v6;
1342 unsigned char *hdr;
1343 } ip;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001344
Thanneeru Srinivasulu3a9024f2017-04-06 16:12:26 +05301345 ip.hdr = skb_network_header(skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001346 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001347 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1348 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301349
1350 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
1351 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
1352 * segment transmitted on 88xx.
1353 */
1354 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
1355 } else {
1356 sq->skbuff[qentry] = (u64)skb;
1357 /* Enable notification via CQE after processing SQE */
1358 hdr->post_cqe = 1;
1359 /* No of subdescriptors following this */
1360 hdr->subdesc_cnt = subdesc_cnt;
1361 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001362 hdr->tot_len = len;
1363
1364 /* Offload checksum calculation to HW */
1365 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Florian Westphal134059f2017-12-06 01:04:50 +01001366 if (ip.v4->version == 4)
1367 hdr->csum_l3 = 1; /* Enable IP csum calculation */
Sunil Goutham4863dea2015-05-26 19:20:15 -07001368 hdr->l3_offset = skb_network_offset(skb);
1369 hdr->l4_offset = skb_transport_offset(skb);
1370
Thanneeru Srinivasulu3a9024f2017-04-06 16:12:26 +05301371 proto = (ip.v4->version == 4) ? ip.v4->protocol :
1372 ip.v6->nexthdr;
1373
Sunil Goutham4863dea2015-05-26 19:20:15 -07001374 switch (proto) {
1375 case IPPROTO_TCP:
1376 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1377 break;
1378 case IPPROTO_UDP:
1379 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1380 break;
1381 case IPPROTO_SCTP:
1382 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1383 break;
1384 }
1385 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301386
1387 if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
1388 hdr->tso = 1;
1389 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
1390 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1391 /* For non-tunneled pkts, point this to L2 ethertype */
1392 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
Sunil Goutham964cb692016-11-15 17:38:16 +05301393 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301394 }
Sunil Goutham4a875502018-01-15 18:44:57 +06001395
1396 /* Check if timestamp is requested */
1397 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1398 skb_tx_timestamp(skb);
1399 return;
1400 }
1401
1402 /* Tx timestamping not supported along with TSO, so ignore request */
1403 if (skb_shinfo(skb)->gso_size)
1404 return;
1405
1406 /* HW supports only a single outstanding packet to timestamp */
1407 if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
1408 return;
1409
1410 /* Mark the SKB for later reference */
1411 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1412
1413 /* Finally enable timestamp generation
1414 * Since 'post_cqe' is also set, two CQEs will be posted
1415 * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
1416 */
1417 hdr->tstmp = 1;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001418}
1419
1420/* SQ GATHER subdescriptor
1421 * Must follow HDR descriptor
1422 */
1423static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1424 int size, u64 data)
1425{
1426 struct sq_gather_subdesc *gather;
1427
1428 qentry &= (sq->dmem.q_len - 1);
1429 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1430
1431 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1432 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
Sunil Goutham4b561c12015-07-29 16:49:36 +03001433 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001434 gather->size = size;
1435 gather->addr = data;
1436}
1437
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301438/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1439 * packet so that a CQE is posted as a notifation for transmission of
1440 * TSO packet.
1441 */
1442static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1443 int tso_sqe, struct sk_buff *skb)
1444{
1445 struct sq_imm_subdesc *imm;
1446 struct sq_hdr_subdesc *hdr;
1447
1448 sq->skbuff[qentry] = (u64)skb;
1449
1450 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1451 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1452 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1453 /* Enable notification via CQE after processing SQE */
1454 hdr->post_cqe = 1;
1455 /* There is no packet to transmit here */
1456 hdr->dont_send = 1;
1457 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1458 hdr->tot_len = 1;
1459 /* Actual TSO header SQE index, needed for cleanup */
1460 hdr->rsvd2 = tso_sqe;
1461
1462 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1463 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1464 memset(imm, 0, SND_QUEUE_DESC_SIZE);
1465 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1466 imm->len = 1;
1467}
1468
Sunil Goutham2c204c22016-09-23 14:42:28 +05301469static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
1470 int sq_num, int desc_cnt)
1471{
1472 struct netdev_queue *txq;
1473
1474 txq = netdev_get_tx_queue(nic->pnicvf->netdev,
1475 skb_get_queue_mapping(skb));
1476
1477 netdev_tx_sent_queue(txq, skb->len);
1478
1479 /* make sure all memory stores are done before ringing doorbell */
1480 smp_wmb();
1481
1482 /* Inform HW to xmit all TSO segments */
1483 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1484 sq_num, desc_cnt);
1485}
1486
Sunil Goutham4863dea2015-05-26 19:20:15 -07001487/* Segment a TSO packet into 'gso_size' segments and append
1488 * them to SQ for transfer
1489 */
1490static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
Sunil Goutham92dc8762015-08-30 12:29:15 +03001491 int sq_num, int qentry, struct sk_buff *skb)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001492{
1493 struct tso_t tso;
1494 int seg_subdescs = 0, desc_cnt = 0;
1495 int seg_len, total_len, data_left;
1496 int hdr_qentry = qentry;
1497 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1498
1499 tso_start(skb, &tso);
1500 total_len = skb->len - hdr_len;
1501 while (total_len > 0) {
1502 char *hdr;
1503
1504 /* Save Qentry for adding HDR_SUBDESC at the end */
1505 hdr_qentry = qentry;
1506
1507 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1508 total_len -= data_left;
1509
1510 /* Add segment's header */
1511 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1512 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1513 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1514 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1515 sq->tso_hdrs_phys +
1516 qentry * TSO_HEADER_SIZE);
1517 /* HDR_SUDESC + GATHER */
1518 seg_subdescs = 2;
1519 seg_len = hdr_len;
1520
1521 /* Add segment's payload fragments */
1522 while (data_left > 0) {
1523 int size;
1524
1525 size = min_t(int, tso.size, data_left);
1526
1527 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1528 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1529 virt_to_phys(tso.data));
1530 seg_subdescs++;
1531 seg_len += size;
1532
1533 data_left -= size;
1534 tso_build_data(skb, &tso, size);
1535 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301536 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001537 seg_subdescs - 1, skb, seg_len);
Sunil Goutham143ceb02015-07-29 16:49:37 +03001538 sq->skbuff[hdr_qentry] = (u64)NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001539 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1540
1541 desc_cnt += seg_subdescs;
1542 }
1543 /* Save SKB in the last segment for freeing */
1544 sq->skbuff[hdr_qentry] = (u64)skb;
1545
Sunil Goutham2c204c22016-09-23 14:42:28 +05301546 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001547
Sunil Goutham964cb692016-11-15 17:38:16 +05301548 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001549 return 1;
1550}
1551
1552/* Append an skb to a SQ for packet transfer. */
Sunil Gouthambd3ad7d2016-12-01 18:24:28 +05301553int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1554 struct sk_buff *skb, u8 sq_num)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001555{
1556 int i, size;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301557 int subdesc_cnt, hdr_sqe = 0;
Sunil Gouthambd3ad7d2016-12-01 18:24:28 +05301558 int qentry;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301559 u64 dma_addr;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001560
1561 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1562 if (subdesc_cnt > atomic_read(&sq->free_cnt))
1563 goto append_fail;
1564
1565 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1566
1567 /* Check if its a TSO packet */
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301568 if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
Sunil Goutham92dc8762015-08-30 12:29:15 +03001569 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001570
1571 /* Add SQ header subdesc */
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301572 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1573 skb, skb->len);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301574 hdr_sqe = qentry;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001575
1576 /* Add SQ gather subdescs */
1577 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1578 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301579 /* HW will ensure data coherency, CPU sync not required */
1580 dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
1581 offset_in_page(skb->data), size,
1582 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1583 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1584 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1585 return 0;
1586 }
1587
1588 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001589
1590 /* Check for scattered buffer */
1591 if (!skb_is_nonlinear(skb))
1592 goto doorbell;
1593
1594 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1595 const struct skb_frag_struct *frag;
1596
1597 frag = &skb_shinfo(skb)->frags[i];
1598
1599 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1600 size = skb_frag_size(frag);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301601 dma_addr = dma_map_page_attrs(&nic->pdev->dev,
1602 skb_frag_page(frag),
1603 frag->page_offset, size,
1604 DMA_TO_DEVICE,
1605 DMA_ATTR_SKIP_CPU_SYNC);
1606 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1607 /* Free entire chain of mapped buffers
1608 * here 'i' = frags mapped + above mapped skb->data
1609 */
1610 nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
1611 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1612 return 0;
1613 }
1614 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001615 }
1616
1617doorbell:
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301618 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1619 qentry = nicvf_get_nxt_sqentry(sq, qentry);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301620 nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301621 }
1622
Sunil Goutham2c204c22016-09-23 14:42:28 +05301623 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001624
Sunil Goutham4863dea2015-05-26 19:20:15 -07001625 return 1;
1626
1627append_fail:
Sunil Goutham92dc8762015-08-30 12:29:15 +03001628 /* Use original PCI dev for debug log */
1629 nic = nic->pnicvf;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001630 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1631 return 0;
1632}
1633
1634static inline unsigned frag_num(unsigned i)
1635{
1636#ifdef __BIG_ENDIAN
1637 return (i & ~3) + 3 - (i & 3);
1638#else
1639 return i;
1640#endif
1641}
1642
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301643static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
1644 u64 buf_addr, bool xdp)
1645{
1646 struct page *page = NULL;
1647 int len = RCV_FRAG_LEN;
1648
1649 if (xdp) {
1650 page = virt_to_page(phys_to_virt(buf_addr));
1651 /* Check if it's a recycled page, if not
1652 * unmap the DMA mapping.
1653 *
1654 * Recycled page holds an extra reference.
1655 */
1656 if (page_ref_count(page) != 1)
1657 return;
Sunil Gouthame3d06ff2017-05-02 18:36:57 +05301658
Sunil Gouthamaa136d02017-11-24 15:03:26 +03001659 len += XDP_HEADROOM;
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301660 /* Receive buffers in XDP mode are mapped from page start */
1661 dma_addr &= PAGE_MASK;
1662 }
1663 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
1664 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1665}
1666
Sunil Goutham4863dea2015-05-26 19:20:15 -07001667/* Returns SKB for a received packet */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301668struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
1669 struct cqe_rx_t *cqe_rx, bool xdp)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001670{
1671 int frag;
1672 int payload_len = 0;
1673 struct sk_buff *skb = NULL;
Sunil Gouthama8671ac2016-08-12 16:51:37 +05301674 struct page *page;
1675 int offset;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001676 u16 *rb_lens = NULL;
1677 u64 *rb_ptrs = NULL;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301678 u64 phys_addr;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001679
1680 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
Sunil Goutham02a72bd2016-08-12 16:51:28 +05301681 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
1682 * CQE_RX at word6, hence buffer pointers move by word
1683 *
1684 * Use existing 'hw_tso' flag which will be set for all chips
1685 * except 88xx pass1 instead of a additional cache line
1686 * access (or miss) by using pci dev's revision.
1687 */
1688 if (!nic->hw_tso)
1689 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1690 else
1691 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
Sunil Goutham4863dea2015-05-26 19:20:15 -07001692
Sunil Goutham4863dea2015-05-26 19:20:15 -07001693 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1694 payload_len = rb_lens[frag_num(frag)];
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301695 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
1696 if (!phys_addr) {
1697 if (skb)
1698 dev_kfree_skb_any(skb);
1699 return NULL;
1700 }
1701
Sunil Goutham4863dea2015-05-26 19:20:15 -07001702 if (!frag) {
1703 /* First fragment */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301704 nicvf_unmap_rcv_buffer(nic,
1705 *rb_ptrs - cqe_rx->align_pad,
1706 phys_addr, xdp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001707 skb = nicvf_rb_ptr_to_skb(nic,
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301708 phys_addr - cqe_rx->align_pad,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001709 payload_len);
1710 if (!skb)
1711 return NULL;
1712 skb_reserve(skb, cqe_rx->align_pad);
1713 skb_put(skb, payload_len);
1714 } else {
1715 /* Add fragments */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301716 nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301717 page = virt_to_page(phys_to_virt(phys_addr));
1718 offset = phys_to_virt(phys_addr) - page_address(page);
Sunil Gouthama8671ac2016-08-12 16:51:37 +05301719 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1720 offset, payload_len, RCV_FRAG_LEN);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001721 }
1722 /* Next buffer pointer */
1723 rb_ptrs++;
1724 }
1725 return skb;
1726}
1727
Yury Norovb45ceb42015-12-07 10:30:32 +05301728static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001729{
1730 u64 reg_val;
1731
Sunil Goutham4863dea2015-05-26 19:20:15 -07001732 switch (int_type) {
1733 case NICVF_INTR_CQ:
1734 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1735 break;
1736 case NICVF_INTR_SQ:
1737 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1738 break;
1739 case NICVF_INTR_RBDR:
1740 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1741 break;
1742 case NICVF_INTR_PKT_DROP:
1743 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1744 break;
1745 case NICVF_INTR_TCP_TIMER:
1746 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1747 break;
1748 case NICVF_INTR_MBOX:
1749 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1750 break;
1751 case NICVF_INTR_QS_ERR:
Yury Norovb45ceb42015-12-07 10:30:32 +05301752 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001753 break;
1754 default:
Yury Norovb45ceb42015-12-07 10:30:32 +05301755 reg_val = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001756 }
1757
Yury Norovb45ceb42015-12-07 10:30:32 +05301758 return reg_val;
1759}
1760
1761/* Enable interrupt */
1762void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1763{
1764 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1765
1766 if (!mask) {
1767 netdev_dbg(nic->netdev,
1768 "Failed to enable interrupt: unknown type\n");
1769 return;
1770 }
1771 nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1772 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
1773}
1774
1775/* Disable interrupt */
1776void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1777{
1778 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1779
1780 if (!mask) {
1781 netdev_dbg(nic->netdev,
1782 "Failed to disable interrupt: unknown type\n");
1783 return;
1784 }
1785
1786 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
1787}
1788
1789/* Clear interrupt */
1790void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1791{
1792 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1793
1794 if (!mask) {
1795 netdev_dbg(nic->netdev,
1796 "Failed to clear interrupt: unknown type\n");
1797 return;
1798 }
1799
1800 nicvf_reg_write(nic, NIC_VF_INT, mask);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001801}
1802
1803/* Check if interrupt is enabled */
1804int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1805{
Yury Norovb45ceb42015-12-07 10:30:32 +05301806 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1807 /* If interrupt type is unknown, we treat it disabled. */
1808 if (!mask) {
1809 netdev_dbg(nic->netdev,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001810 "Failed to check interrupt enable: unknown type\n");
Yury Norovb45ceb42015-12-07 10:30:32 +05301811 return 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001812 }
1813
Yury Norovb45ceb42015-12-07 10:30:32 +05301814 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001815}
1816
1817void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1818{
1819 struct rcv_queue *rq;
1820
1821#define GET_RQ_STATS(reg) \
1822 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1823 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1824
1825 rq = &nic->qs->rq[rq_idx];
1826 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1827 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1828}
1829
1830void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1831{
1832 struct snd_queue *sq;
1833
1834#define GET_SQ_STATS(reg) \
1835 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1836 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1837
1838 sq = &nic->qs->sq[sq_idx];
1839 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1840 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1841}
1842
1843/* Check for errors in the receive cmp.queue entry */
Sunil Gouthamad2eceb2016-02-16 16:29:51 +05301844int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001845{
Joe Perchesbf24e132017-06-27 03:56:54 -07001846 netif_err(nic, rx_err, nic->netdev,
1847 "RX error CQE err_level 0x%x err_opcode 0x%x\n",
1848 cqe_rx->err_level, cqe_rx->err_opcode);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001849
Sunil Goutham4863dea2015-05-26 19:20:15 -07001850 switch (cqe_rx->err_opcode) {
1851 case CQ_RX_ERROP_RE_PARTIAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301852 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001853 break;
1854 case CQ_RX_ERROP_RE_JABBER:
Sunil Goutham964cb692016-11-15 17:38:16 +05301855 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001856 break;
1857 case CQ_RX_ERROP_RE_FCS:
Sunil Goutham964cb692016-11-15 17:38:16 +05301858 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001859 break;
1860 case CQ_RX_ERROP_RE_RX_CTL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301861 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001862 break;
1863 case CQ_RX_ERROP_PREL2_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301864 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001865 break;
1866 case CQ_RX_ERROP_L2_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301867 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001868 break;
1869 case CQ_RX_ERROP_L2_OVERSIZE:
Sunil Goutham964cb692016-11-15 17:38:16 +05301870 this_cpu_inc(nic->drv_stats->rx_oversize);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001871 break;
1872 case CQ_RX_ERROP_L2_UNDERSIZE:
Sunil Goutham964cb692016-11-15 17:38:16 +05301873 this_cpu_inc(nic->drv_stats->rx_undersize);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001874 break;
1875 case CQ_RX_ERROP_L2_LENMISM:
Sunil Goutham964cb692016-11-15 17:38:16 +05301876 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001877 break;
1878 case CQ_RX_ERROP_L2_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301879 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001880 break;
1881 case CQ_RX_ERROP_IP_NOT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301882 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001883 break;
1884 case CQ_RX_ERROP_IP_CSUM_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301885 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001886 break;
1887 case CQ_RX_ERROP_IP_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301888 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001889 break;
1890 case CQ_RX_ERROP_IP_MALD:
Sunil Goutham964cb692016-11-15 17:38:16 +05301891 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001892 break;
1893 case CQ_RX_ERROP_IP_HOP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301894 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001895 break;
1896 case CQ_RX_ERROP_L3_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301897 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001898 break;
1899 case CQ_RX_ERROP_L4_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301900 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001901 break;
1902 case CQ_RX_ERROP_L4_CHK:
Sunil Goutham964cb692016-11-15 17:38:16 +05301903 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001904 break;
1905 case CQ_RX_ERROP_UDP_LEN:
Sunil Goutham964cb692016-11-15 17:38:16 +05301906 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001907 break;
1908 case CQ_RX_ERROP_L4_PORT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301909 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001910 break;
1911 case CQ_RX_ERROP_TCP_FLAG:
Sunil Goutham964cb692016-11-15 17:38:16 +05301912 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001913 break;
1914 case CQ_RX_ERROP_TCP_OFFSET:
Sunil Goutham964cb692016-11-15 17:38:16 +05301915 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001916 break;
1917 case CQ_RX_ERROP_L4_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301918 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001919 break;
1920 case CQ_RX_ERROP_RBDR_TRUNC:
Sunil Goutham964cb692016-11-15 17:38:16 +05301921 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001922 break;
1923 }
1924
1925 return 1;
1926}
1927
1928/* Check for errors in the send cmp.queue entry */
Sunil Goutham964cb692016-11-15 17:38:16 +05301929int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001930{
Sunil Goutham4863dea2015-05-26 19:20:15 -07001931 switch (cqe_tx->send_status) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001932 case CQ_TX_ERROP_DESC_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301933 this_cpu_inc(nic->drv_stats->tx_desc_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001934 break;
1935 case CQ_TX_ERROP_HDR_CONS_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301936 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001937 break;
1938 case CQ_TX_ERROP_SUBDC_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301939 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001940 break;
Sunil Goutham712c3182016-11-15 17:37:36 +05301941 case CQ_TX_ERROP_MAX_SIZE_VIOL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301942 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
Sunil Goutham712c3182016-11-15 17:37:36 +05301943 break;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001944 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
Sunil Goutham964cb692016-11-15 17:38:16 +05301945 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001946 break;
1947 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301948 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001949 break;
1950 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301951 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001952 break;
1953 case CQ_TX_ERROP_LOCK_VIOL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301954 this_cpu_inc(nic->drv_stats->tx_lock_viol);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001955 break;
1956 case CQ_TX_ERROP_DATA_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301957 this_cpu_inc(nic->drv_stats->tx_data_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001958 break;
1959 case CQ_TX_ERROP_TSTMP_CONFLICT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301960 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001961 break;
1962 case CQ_TX_ERROP_TSTMP_TIMEOUT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301963 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001964 break;
1965 case CQ_TX_ERROP_MEM_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301966 this_cpu_inc(nic->drv_stats->tx_mem_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001967 break;
1968 case CQ_TX_ERROP_CK_OVERLAP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301969 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001970 break;
1971 case CQ_TX_ERROP_CK_OFLOW:
Sunil Goutham964cb692016-11-15 17:38:16 +05301972 this_cpu_inc(nic->drv_stats->tx_csum_overflow);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001973 break;
1974 }
1975
1976 return 1;
1977}