blob: 43428ce760ca5db5856e6cf95c5a6b53eb2f32ad [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/pci.h>
10#include <linux/netdevice.h>
11#include <linux/ip.h>
12#include <linux/etherdevice.h>
Sunil Goutham83abb7d2017-03-07 18:09:08 +053013#include <linux/iommu.h>
Sunil Goutham4863dea2015-05-26 19:20:15 -070014#include <net/ip.h>
15#include <net/tso.h>
16
17#include "nic_reg.h"
18#include "nic.h"
19#include "q_struct.h"
20#include "nicvf_queues.h"
21
Sunil Goutham16f2bcc2017-05-02 18:36:56 +053022static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
23 int size, u64 data);
Sunil Goutham5c2e26f2016-03-14 16:36:14 +053024static void nicvf_get_page(struct nicvf *nic)
25{
26 if (!nic->rb_pageref || !nic->rb_page)
27 return;
28
Joonsoo Kim6d061f92016-05-19 17:10:46 -070029 page_ref_add(nic->rb_page, nic->rb_pageref);
Sunil Goutham5c2e26f2016-03-14 16:36:14 +053030 nic->rb_pageref = 0;
31}
32
Sunil Goutham4863dea2015-05-26 19:20:15 -070033/* Poll a register for a specific value */
34static int nicvf_poll_reg(struct nicvf *nic, int qidx,
35 u64 reg, int bit_pos, int bits, int val)
36{
37 u64 bit_mask;
38 u64 reg_val;
39 int timeout = 10;
40
41 bit_mask = (1ULL << bits) - 1;
42 bit_mask = (bit_mask << bit_pos);
43
44 while (timeout) {
45 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
46 if (((reg_val & bit_mask) >> bit_pos) == val)
47 return 0;
48 usleep_range(1000, 2000);
49 timeout--;
50 }
51 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
52 return 1;
53}
54
55/* Allocate memory for a queue's descriptors */
56static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
57 int q_len, int desc_size, int align_bytes)
58{
59 dmem->q_len = q_len;
60 dmem->size = (desc_size * q_len) + align_bytes;
61 /* Save address, need it while freeing */
62 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
63 &dmem->dma, GFP_KERNEL);
64 if (!dmem->unalign_base)
65 return -ENOMEM;
66
67 /* Align memory address for 'align_bytes' */
68 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
Aleksey Makarov39a0dd02015-06-02 11:00:25 -070069 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
Sunil Goutham4863dea2015-05-26 19:20:15 -070070 return 0;
71}
72
73/* Free queue's descriptor memory */
74static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
75{
76 if (!dmem)
77 return;
78
79 dma_free_coherent(&nic->pdev->dev, dmem->size,
80 dmem->unalign_base, dmem->dma);
81 dmem->unalign_base = NULL;
82 dmem->base = NULL;
83}
84
Sunil Goutham5836b442017-05-02 18:36:50 +053085/* Allocate a new page or recycle one if possible
86 *
87 * We cannot optimize dma mapping here, since
88 * 1. It's only one RBDR ring for 8 Rx queues.
89 * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed
90 * and not idx into RBDR ring, so can't refer to saved info.
91 * 3. There are multiple receive buffers per page
Sunil Goutham4863dea2015-05-26 19:20:15 -070092 */
Sunil Goutham5836b442017-05-02 18:36:50 +053093static struct pgcache *nicvf_alloc_page(struct nicvf *nic,
94 struct rbdr *rbdr, gfp_t gfp)
Sunil Goutham4863dea2015-05-26 19:20:15 -070095{
Sunil Goutham5836b442017-05-02 18:36:50 +053096 struct page *page = NULL;
97 struct pgcache *pgcache, *next;
98
99 /* Check if page is already allocated */
100 pgcache = &rbdr->pgcache[rbdr->pgidx];
101 page = pgcache->page;
102 /* Check if page can be recycled */
103 if (page && (page_ref_count(page) != 1))
104 page = NULL;
105
106 if (!page) {
107 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
108 if (!page)
109 return NULL;
110
111 this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
112
113 /* Check for space */
114 if (rbdr->pgalloc >= rbdr->pgcnt) {
115 /* Page can still be used */
116 nic->rb_page = page;
117 return NULL;
118 }
119
120 /* Save the page in page cache */
121 pgcache->page = page;
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530122 pgcache->dma_addr = 0;
Sunil Goutham5836b442017-05-02 18:36:50 +0530123 rbdr->pgalloc++;
124 }
125
126 /* Take extra page reference for recycling */
127 page_ref_add(page, 1);
128
129 rbdr->pgidx++;
130 rbdr->pgidx &= (rbdr->pgcnt - 1);
131
132 /* Prefetch refcount of next page in page cache */
133 next = &rbdr->pgcache[rbdr->pgidx];
134 page = next->page;
135 if (page)
136 prefetch(&page->_refcount);
137
138 return pgcache;
139}
140
141/* Allocate buffer for packet reception */
142static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
Sunil Goutham927987f2017-05-02 18:36:53 +0530143 gfp_t gfp, u32 buf_len, u64 *rbuf)
Sunil Goutham5836b442017-05-02 18:36:50 +0530144{
145 struct pgcache *pgcache = NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700146
Sunil Goutham05c773f2017-05-02 18:36:54 +0530147 /* Check if request can be accomodated in previous allocated page.
148 * But in XDP mode only one buffer per page is permitted.
149 */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530150 if (!rbdr->is_xdp && nic->rb_page &&
Sunil Goutham5836b442017-05-02 18:36:50 +0530151 ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530152 nic->rb_pageref++;
153 goto ret;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700154 }
155
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530156 nicvf_get_page(nic);
Sunil Goutham5836b442017-05-02 18:36:50 +0530157 nic->rb_page = NULL;
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530158
Sunil Goutham5836b442017-05-02 18:36:50 +0530159 /* Get new page, either recycled or new one */
160 pgcache = nicvf_alloc_page(nic, rbdr, gfp);
161 if (!pgcache && !nic->rb_page) {
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530162 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
163 return -ENOMEM;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700164 }
Sunil Goutham5836b442017-05-02 18:36:50 +0530165
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530166 nic->rb_page_offset = 0;
Sunil Gouthame3d06ff2017-05-02 18:36:57 +0530167
168 /* Reserve space for header modifications by BPF program */
169 if (rbdr->is_xdp)
170 buf_len += XDP_PACKET_HEADROOM;
171
Sunil Goutham5836b442017-05-02 18:36:50 +0530172 /* Check if it's recycled */
173 if (pgcache)
174 nic->rb_page = pgcache->page;
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530175ret:
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530176 if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
177 *rbuf = pgcache->dma_addr;
178 } else {
179 /* HW will ensure data coherency, CPU sync not required */
180 *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
181 nic->rb_page_offset, buf_len,
182 DMA_FROM_DEVICE,
183 DMA_ATTR_SKIP_CPU_SYNC);
184 if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
185 if (!nic->rb_page_offset)
186 __free_pages(nic->rb_page, 0);
187 nic->rb_page = NULL;
188 return -ENOMEM;
189 }
190 if (pgcache)
Sunil Gouthame3d06ff2017-05-02 18:36:57 +0530191 pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530192 nic->rb_page_offset += buf_len;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530193 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700194
Sunil Goutham4863dea2015-05-26 19:20:15 -0700195 return 0;
196}
197
Sunil Goutham668dda02015-12-07 10:30:33 +0530198/* Build skb around receive buffer */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700199static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
200 u64 rb_ptr, int len)
201{
Sunil Goutham668dda02015-12-07 10:30:33 +0530202 void *data;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700203 struct sk_buff *skb;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700204
Sunil Goutham668dda02015-12-07 10:30:33 +0530205 data = phys_to_virt(rb_ptr);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700206
207 /* Now build an skb to give to stack */
Sunil Goutham668dda02015-12-07 10:30:33 +0530208 skb = build_skb(data, RCV_FRAG_LEN);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700209 if (!skb) {
Sunil Goutham668dda02015-12-07 10:30:33 +0530210 put_page(virt_to_page(data));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700211 return NULL;
212 }
213
Sunil Goutham668dda02015-12-07 10:30:33 +0530214 prefetch(skb->data);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700215 return skb;
216}
217
218/* Allocate RBDR ring and populate receive buffers */
219static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
220 int ring_len, int buf_size)
221{
222 int idx;
Sunil Goutham927987f2017-05-02 18:36:53 +0530223 u64 rbuf;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700224 struct rbdr_entry_t *desc;
225 int err;
226
227 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
228 sizeof(struct rbdr_entry_t),
229 NICVF_RCV_BUF_ALIGN_BYTES);
230 if (err)
231 return err;
232
233 rbdr->desc = rbdr->dmem.base;
234 /* Buffer size has to be in multiples of 128 bytes */
235 rbdr->dma_size = buf_size;
236 rbdr->enable = true;
237 rbdr->thresh = RBDR_THRESH;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530238 rbdr->head = 0;
239 rbdr->tail = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700240
Sunil Goutham5836b442017-05-02 18:36:50 +0530241 /* Initialize page recycling stuff.
242 *
243 * Can't use single buffer per page especially with 64K pages.
244 * On embedded platforms i.e 81xx/83xx available memory itself
245 * is low and minimum ring size of RBDR is 8K, that takes away
246 * lots of memory.
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530247 *
248 * But for XDP it has to be a single buffer per page.
Sunil Goutham5836b442017-05-02 18:36:50 +0530249 */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +0530250 if (!nic->pnicvf->xdp_prog) {
251 rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
252 rbdr->is_xdp = false;
253 } else {
254 rbdr->pgcnt = ring_len;
255 rbdr->is_xdp = true;
256 }
Sunil Goutham5836b442017-05-02 18:36:50 +0530257 rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
258 rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) *
259 rbdr->pgcnt, GFP_KERNEL);
260 if (!rbdr->pgcache)
261 return -ENOMEM;
262 rbdr->pgidx = 0;
263 rbdr->pgalloc = 0;
264
Sunil Goutham4863dea2015-05-26 19:20:15 -0700265 nic->rb_page = NULL;
266 for (idx = 0; idx < ring_len; idx++) {
Sunil Goutham5836b442017-05-02 18:36:50 +0530267 err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
268 RCV_FRAG_LEN, &rbuf);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530269 if (err) {
270 /* To free already allocated and mapped ones */
271 rbdr->tail = idx - 1;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700272 return err;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530273 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700274
275 desc = GET_RBDR_DESC(rbdr, idx);
Sunil Goutham927987f2017-05-02 18:36:53 +0530276 desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700277 }
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530278
279 nicvf_get_page(nic);
280
Sunil Goutham4863dea2015-05-26 19:20:15 -0700281 return 0;
282}
283
284/* Free RBDR ring and its receive buffers */
285static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
286{
287 int head, tail;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530288 u64 buf_addr, phys_addr;
Sunil Goutham5836b442017-05-02 18:36:50 +0530289 struct pgcache *pgcache;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700290 struct rbdr_entry_t *desc;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700291
292 if (!rbdr)
293 return;
294
295 rbdr->enable = false;
296 if (!rbdr->dmem.base)
297 return;
298
299 head = rbdr->head;
300 tail = rbdr->tail;
301
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530302 /* Release page references */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700303 while (head != tail) {
304 desc = GET_RBDR_DESC(rbdr, head);
Sunil Goutham5e848e42017-05-02 18:36:51 +0530305 buf_addr = desc->buf_addr;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530306 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
307 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
308 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
309 if (phys_addr)
310 put_page(virt_to_page(phys_to_virt(phys_addr)));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700311 head++;
312 head &= (rbdr->dmem.q_len - 1);
313 }
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530314 /* Release buffer of tail desc */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700315 desc = GET_RBDR_DESC(rbdr, tail);
Sunil Goutham5e848e42017-05-02 18:36:51 +0530316 buf_addr = desc->buf_addr;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530317 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
318 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
319 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
320 if (phys_addr)
321 put_page(virt_to_page(phys_to_virt(phys_addr)));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700322
Sunil Goutham5836b442017-05-02 18:36:50 +0530323 /* Sync page cache info */
324 smp_rmb();
325
326 /* Release additional page references held for recycling */
327 head = 0;
328 while (head < rbdr->pgcnt) {
329 pgcache = &rbdr->pgcache[head];
330 if (pgcache->page && page_ref_count(pgcache->page) != 0)
331 put_page(pgcache->page);
332 head++;
333 }
334
Sunil Goutham4863dea2015-05-26 19:20:15 -0700335 /* Free RBDR ring */
336 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
337}
338
339/* Refill receive buffer descriptors with new buffers.
340 */
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700341static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700342{
343 struct queue_set *qs = nic->qs;
344 int rbdr_idx = qs->rbdr_cnt;
345 int tail, qcount;
346 int refill_rb_cnt;
347 struct rbdr *rbdr;
348 struct rbdr_entry_t *desc;
Sunil Goutham927987f2017-05-02 18:36:53 +0530349 u64 rbuf;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700350 int new_rb = 0;
351
352refill:
353 if (!rbdr_idx)
354 return;
355 rbdr_idx--;
356 rbdr = &qs->rbdr[rbdr_idx];
357 /* Check if it's enabled */
358 if (!rbdr->enable)
359 goto next_rbdr;
360
361 /* Get no of desc's to be refilled */
362 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
363 qcount &= 0x7FFFF;
364 /* Doorbell can be ringed with a max of ring size minus 1 */
365 if (qcount >= (qs->rbdr_len - 1))
366 goto next_rbdr;
367 else
368 refill_rb_cnt = qs->rbdr_len - qcount - 1;
369
Sunil Goutham5836b442017-05-02 18:36:50 +0530370 /* Sync page cache info */
371 smp_rmb();
372
Sunil Goutham4863dea2015-05-26 19:20:15 -0700373 /* Start filling descs from tail */
374 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
375 while (refill_rb_cnt) {
376 tail++;
377 tail &= (rbdr->dmem.q_len - 1);
378
Sunil Goutham5836b442017-05-02 18:36:50 +0530379 if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700380 break;
381
382 desc = GET_RBDR_DESC(rbdr, tail);
Sunil Goutham927987f2017-05-02 18:36:53 +0530383 desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700384 refill_rb_cnt--;
385 new_rb++;
386 }
387
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530388 nicvf_get_page(nic);
389
Sunil Goutham4863dea2015-05-26 19:20:15 -0700390 /* make sure all memory stores are done before ringing doorbell */
391 smp_wmb();
392
393 /* Check if buffer allocation failed */
394 if (refill_rb_cnt)
395 nic->rb_alloc_fail = true;
396 else
397 nic->rb_alloc_fail = false;
398
399 /* Notify HW */
400 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
401 rbdr_idx, new_rb);
402next_rbdr:
403 /* Re-enable RBDR interrupts only if buffer allocation is success */
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530404 if (!nic->rb_alloc_fail && rbdr->enable &&
405 netif_running(nic->pnicvf->netdev))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700406 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
407
408 if (rbdr_idx)
409 goto refill;
410}
411
412/* Alloc rcv buffers in non-atomic mode for better success */
413void nicvf_rbdr_work(struct work_struct *work)
414{
415 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
416
417 nicvf_refill_rbdr(nic, GFP_KERNEL);
418 if (nic->rb_alloc_fail)
419 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
420 else
421 nic->rb_work_scheduled = false;
422}
423
424/* In Softirq context, alloc rcv buffers in atomic mode */
425void nicvf_rbdr_task(unsigned long data)
426{
427 struct nicvf *nic = (struct nicvf *)data;
428
429 nicvf_refill_rbdr(nic, GFP_ATOMIC);
430 if (nic->rb_alloc_fail) {
431 nic->rb_work_scheduled = true;
432 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
433 }
434}
435
436/* Initialize completion queue */
437static int nicvf_init_cmp_queue(struct nicvf *nic,
438 struct cmp_queue *cq, int q_len)
439{
440 int err;
441
442 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
443 NICVF_CQ_BASE_ALIGN_BYTES);
444 if (err)
445 return err;
446
447 cq->desc = cq->dmem.base;
Sunil Gouthamb9687b42015-12-10 13:25:20 +0530448 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700449 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
450
451 return 0;
452}
453
454static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
455{
456 if (!cq)
457 return;
458 if (!cq->dmem.base)
459 return;
460
461 nicvf_free_q_desc_mem(nic, &cq->dmem);
462}
463
464/* Initialize transmit queue */
465static int nicvf_init_snd_queue(struct nicvf *nic,
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530466 struct snd_queue *sq, int q_len, int qidx)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700467{
468 int err;
469
470 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
471 NICVF_SQ_BASE_ALIGN_BYTES);
472 if (err)
473 return err;
474
475 sq->desc = sq->dmem.base;
Aleksey Makarov86ace692015-06-02 11:00:27 -0700476 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
Aleksey Makarovfa1a6c92015-06-02 11:00:26 -0700477 if (!sq->skbuff)
478 return -ENOMEM;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530479
Sunil Goutham4863dea2015-05-26 19:20:15 -0700480 sq->head = 0;
481 sq->tail = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700482 sq->thresh = SND_QUEUE_THRESH;
483
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530484 /* Check if this SQ is a XDP TX queue */
485 if (nic->sqs_mode)
486 qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS);
487 if (qidx < nic->pnicvf->xdp_tx_queues) {
488 /* Alloc memory to save page pointers for XDP_TX */
489 sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
490 if (!sq->xdp_page)
491 return -ENOMEM;
492 sq->xdp_desc_cnt = 0;
493 sq->xdp_free_cnt = q_len - 1;
494 sq->is_xdp = true;
495 } else {
496 sq->xdp_page = NULL;
497 sq->xdp_desc_cnt = 0;
498 sq->xdp_free_cnt = 0;
499 sq->is_xdp = false;
500
501 atomic_set(&sq->free_cnt, q_len - 1);
502
503 /* Preallocate memory for TSO segment's header */
504 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
505 q_len * TSO_HEADER_SIZE,
506 &sq->tso_hdrs_phys,
507 GFP_KERNEL);
508 if (!sq->tso_hdrs)
509 return -ENOMEM;
510 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700511
512 return 0;
513}
514
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530515void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
516 int hdr_sqe, u8 subdesc_cnt)
517{
518 u8 idx;
519 struct sq_gather_subdesc *gather;
520
521 /* Unmap DMA mapped skb data buffers */
522 for (idx = 0; idx < subdesc_cnt; idx++) {
523 hdr_sqe++;
524 hdr_sqe &= (sq->dmem.q_len - 1);
525 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
526 /* HW will ensure data coherency, CPU sync not required */
527 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
528 gather->size, DMA_TO_DEVICE,
529 DMA_ATTR_SKIP_CPU_SYNC);
530 }
531}
532
Sunil Goutham4863dea2015-05-26 19:20:15 -0700533static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
534{
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530535 struct sk_buff *skb;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530536 struct page *page;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530537 struct sq_hdr_subdesc *hdr;
538 struct sq_hdr_subdesc *tso_sqe;
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530539
Sunil Goutham4863dea2015-05-26 19:20:15 -0700540 if (!sq)
541 return;
542 if (!sq->dmem.base)
543 return;
544
545 if (sq->tso_hdrs)
Sunil Goutham143ceb02015-07-29 16:49:37 +0300546 dma_free_coherent(&nic->pdev->dev,
547 sq->dmem.q_len * TSO_HEADER_SIZE,
Sunil Goutham4863dea2015-05-26 19:20:15 -0700548 sq->tso_hdrs, sq->tso_hdrs_phys);
549
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530550 /* Free pending skbs in the queue */
551 smp_rmb();
552 while (sq->head != sq->tail) {
553 skb = (struct sk_buff *)sq->skbuff[sq->head];
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530554 if (!skb || !sq->xdp_page)
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530555 goto next;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530556
557 page = (struct page *)sq->xdp_page[sq->head];
558 if (!page)
559 goto next;
560 else
561 put_page(page);
562
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530563 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
564 /* Check for dummy descriptor used for HW TSO offload on 88xx */
565 if (hdr->dont_send) {
566 /* Get actual TSO descriptors and unmap them */
567 tso_sqe =
568 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
569 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
570 tso_sqe->subdesc_cnt);
571 } else {
572 nicvf_unmap_sndq_buffers(nic, sq, sq->head,
573 hdr->subdesc_cnt);
574 }
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530575 if (skb)
576 dev_kfree_skb_any(skb);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530577next:
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530578 sq->head++;
579 sq->head &= (sq->dmem.q_len - 1);
580 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700581 kfree(sq->skbuff);
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530582 kfree(sq->xdp_page);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700583 nicvf_free_q_desc_mem(nic, &sq->dmem);
584}
585
586static void nicvf_reclaim_snd_queue(struct nicvf *nic,
587 struct queue_set *qs, int qidx)
588{
589 /* Disable send queue */
590 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
591 /* Check if SQ is stopped */
592 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
593 return;
594 /* Reset send queue */
595 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
596}
597
598static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
599 struct queue_set *qs, int qidx)
600{
601 union nic_mbx mbx = {};
602
603 /* Make sure all packets in the pipeline are written back into mem */
604 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
605 nicvf_send_msg_to_pf(nic, &mbx);
606}
607
608static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
609 struct queue_set *qs, int qidx)
610{
611 /* Disable timer threshold (doesn't get reset upon CQ reset */
612 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
613 /* Disable completion queue */
614 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
615 /* Reset completion queue */
616 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
617}
618
619static void nicvf_reclaim_rbdr(struct nicvf *nic,
620 struct rbdr *rbdr, int qidx)
621{
622 u64 tmp, fifo_state;
623 int timeout = 10;
624
625 /* Save head and tail pointers for feeing up buffers */
626 rbdr->head = nicvf_queue_reg_read(nic,
627 NIC_QSET_RBDR_0_1_HEAD,
628 qidx) >> 3;
629 rbdr->tail = nicvf_queue_reg_read(nic,
630 NIC_QSET_RBDR_0_1_TAIL,
631 qidx) >> 3;
632
633 /* If RBDR FIFO is in 'FAIL' state then do a reset first
634 * before relaiming.
635 */
636 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
637 if (((fifo_state >> 62) & 0x03) == 0x3)
638 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
639 qidx, NICVF_RBDR_RESET);
640
641 /* Disable RBDR */
642 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
643 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
644 return;
645 while (1) {
646 tmp = nicvf_queue_reg_read(nic,
647 NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
648 qidx);
649 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
650 break;
651 usleep_range(1000, 2000);
652 timeout--;
653 if (!timeout) {
654 netdev_err(nic->netdev,
655 "Failed polling on prefetch status\n");
656 return;
657 }
658 }
659 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
660 qidx, NICVF_RBDR_RESET);
661
662 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
663 return;
664 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
665 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
666 return;
667}
668
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300669void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
670{
671 u64 rq_cfg;
672 int sqs;
673
674 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
675
676 /* Enable first VLAN stripping */
677 if (features & NETIF_F_HW_VLAN_CTAG_RX)
678 rq_cfg |= (1ULL << 25);
679 else
680 rq_cfg &= ~(1ULL << 25);
681 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
682
683 /* Configure Secondary Qsets, if any */
684 for (sqs = 0; sqs < nic->sqs_count; sqs++)
685 if (nic->snicvf[sqs])
686 nicvf_queue_reg_write(nic->snicvf[sqs],
687 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
688}
689
Jerin Jacob3458c402016-08-12 16:51:39 +0530690static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
691{
692 union nic_mbx mbx = {};
693
Sunil Goutham964cb692016-11-15 17:38:16 +0530694 /* Reset all RQ/SQ and VF stats */
Jerin Jacob3458c402016-08-12 16:51:39 +0530695 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
Sunil Goutham964cb692016-11-15 17:38:16 +0530696 mbx.reset_stat.rx_stat_mask = 0x3FFF;
697 mbx.reset_stat.tx_stat_mask = 0x1F;
Jerin Jacob3458c402016-08-12 16:51:39 +0530698 mbx.reset_stat.rq_stat_mask = 0xFFFF;
Sunil Goutham964cb692016-11-15 17:38:16 +0530699 mbx.reset_stat.sq_stat_mask = 0xFFFF;
Jerin Jacob3458c402016-08-12 16:51:39 +0530700 nicvf_send_msg_to_pf(nic, &mbx);
701}
702
Sunil Goutham4863dea2015-05-26 19:20:15 -0700703/* Configures receive queue */
704static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
705 int qidx, bool enable)
706{
707 union nic_mbx mbx = {};
708 struct rcv_queue *rq;
709 struct rq_cfg rq_cfg;
710
711 rq = &qs->rq[qidx];
712 rq->enable = enable;
713
714 /* Disable receive queue */
715 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
716
717 if (!rq->enable) {
718 nicvf_reclaim_rcv_queue(nic, qs, qidx);
719 return;
720 }
721
722 rq->cq_qs = qs->vnic_id;
723 rq->cq_idx = qidx;
724 rq->start_rbdr_qs = qs->vnic_id;
725 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
726 rq->cont_rbdr_qs = qs->vnic_id;
727 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
728 /* all writes of RBDR data to be loaded into L2 Cache as well*/
729 rq->caching = 1;
730
731 /* Send a mailbox msg to PF to config RQ */
732 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
733 mbx.rq.qs_num = qs->vnic_id;
734 mbx.rq.rq_num = qidx;
735 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
736 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
737 (rq->cont_qs_rbdr_idx << 8) |
738 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
739 nicvf_send_msg_to_pf(nic, &mbx);
740
741 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
Sunil Gouthamd5b2d7a2016-11-24 14:48:02 +0530742 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
743 (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
744 (qs->vnic_id << 0);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700745 nicvf_send_msg_to_pf(nic, &mbx);
746
747 /* RQ drop config
748 * Enable CQ drop to reserve sufficient CQEs for all tx packets
749 */
750 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
Sunil Gouthamd5b2d7a2016-11-24 14:48:02 +0530751 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
752 (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
753 (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700754 nicvf_send_msg_to_pf(nic, &mbx);
755
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530756 if (!nic->sqs_mode && (qidx == 0)) {
Thanneeru Srinivasulu36fa35d2017-03-07 18:09:11 +0530757 /* Enable checking L3/L4 length and TCP/UDP checksums
758 * Also allow IPv6 pkts with zero UDP checksum.
759 */
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530760 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
Thanneeru Srinivasulu36fa35d2017-03-07 18:09:11 +0530761 (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300762 nicvf_config_vlan_stripping(nic, nic->netdev->features);
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530763 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700764
765 /* Enable Receive queue */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200766 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700767 rq_cfg.ena = 1;
768 rq_cfg.tcp_ena = 0;
769 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
770}
771
772/* Configures completion queue */
773void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
774 int qidx, bool enable)
775{
776 struct cmp_queue *cq;
777 struct cq_cfg cq_cfg;
778
779 cq = &qs->cq[qidx];
780 cq->enable = enable;
781
782 if (!cq->enable) {
783 nicvf_reclaim_cmp_queue(nic, qs, qidx);
784 return;
785 }
786
787 /* Reset completion queue */
788 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
789
790 if (!cq->enable)
791 return;
792
793 spin_lock_init(&cq->lock);
794 /* Set completion queue base address */
795 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
796 qidx, (u64)(cq->dmem.phys_base));
797
798 /* Enable Completion queue */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200799 memset(&cq_cfg, 0, sizeof(struct cq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700800 cq_cfg.ena = 1;
801 cq_cfg.reset = 0;
802 cq_cfg.caching = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530803 cq_cfg.qsize = ilog2(qs->cq_len >> 10);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700804 cq_cfg.avg_con = 0;
805 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
806
807 /* Set threshold value for interrupt generation */
808 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
809 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
Sunil Goutham006394a2015-12-02 15:36:15 +0530810 qidx, CMP_QUEUE_TIMER_THRESH);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700811}
812
813/* Configures transmit queue */
814static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
815 int qidx, bool enable)
816{
817 union nic_mbx mbx = {};
818 struct snd_queue *sq;
819 struct sq_cfg sq_cfg;
820
821 sq = &qs->sq[qidx];
822 sq->enable = enable;
823
824 if (!sq->enable) {
825 nicvf_reclaim_snd_queue(nic, qs, qidx);
826 return;
827 }
828
829 /* Reset send queue */
830 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
831
832 sq->cq_qs = qs->vnic_id;
833 sq->cq_idx = qidx;
834
835 /* Send a mailbox msg to PF to config SQ */
836 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
837 mbx.sq.qs_num = qs->vnic_id;
838 mbx.sq.sq_num = qidx;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300839 mbx.sq.sqs_mode = nic->sqs_mode;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700840 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
841 nicvf_send_msg_to_pf(nic, &mbx);
842
843 /* Set queue base address */
844 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
845 qidx, (u64)(sq->dmem.phys_base));
846
847 /* Enable send queue & set queue size */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200848 memset(&sq_cfg, 0, sizeof(struct sq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700849 sq_cfg.ena = 1;
850 sq_cfg.reset = 0;
851 sq_cfg.ldwb = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530852 sq_cfg.qsize = ilog2(qs->sq_len >> 10);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700853 sq_cfg.tstmp_bgx_intf = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530854 /* CQ's level at which HW will stop processing SQEs to avoid
855 * transmitting a pkt with no space in CQ to post CQE_TX.
856 */
857 sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700858 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
859
860 /* Set threshold value for interrupt generation */
861 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
862
863 /* Set queue:cpu affinity for better load distribution */
864 if (cpu_online(qidx)) {
865 cpumask_set_cpu(qidx, &sq->affinity_mask);
866 netif_set_xps_queue(nic->netdev,
867 &sq->affinity_mask, qidx);
868 }
869}
870
871/* Configures receive buffer descriptor ring */
872static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
873 int qidx, bool enable)
874{
875 struct rbdr *rbdr;
876 struct rbdr_cfg rbdr_cfg;
877
878 rbdr = &qs->rbdr[qidx];
879 nicvf_reclaim_rbdr(nic, rbdr, qidx);
880 if (!enable)
881 return;
882
883 /* Set descriptor base address */
884 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
885 qidx, (u64)(rbdr->dmem.phys_base));
886
887 /* Enable RBDR & set queue size */
888 /* Buffer size should be in multiples of 128 bytes */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200889 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700890 rbdr_cfg.ena = 1;
891 rbdr_cfg.reset = 0;
892 rbdr_cfg.ldwb = 0;
893 rbdr_cfg.qsize = RBDR_SIZE;
894 rbdr_cfg.avg_con = 0;
895 rbdr_cfg.lines = rbdr->dma_size / 128;
896 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
897 qidx, *(u64 *)&rbdr_cfg);
898
899 /* Notify HW */
900 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
901 qidx, qs->rbdr_len - 1);
902
903 /* Set threshold value for interrupt generation */
904 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
905 qidx, rbdr->thresh - 1);
906}
907
908/* Requests PF to assign and enable Qset */
909void nicvf_qset_config(struct nicvf *nic, bool enable)
910{
911 union nic_mbx mbx = {};
912 struct queue_set *qs = nic->qs;
913 struct qs_cfg *qs_cfg;
914
915 if (!qs) {
916 netdev_warn(nic->netdev,
917 "Qset is still not allocated, don't init queues\n");
918 return;
919 }
920
921 qs->enable = enable;
922 qs->vnic_id = nic->vf_id;
923
924 /* Send a mailbox msg to PF to config Qset */
925 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
926 mbx.qs.num = qs->vnic_id;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300927 mbx.qs.sqs_count = nic->sqs_count;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700928
929 mbx.qs.cfg = 0;
930 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
931 if (qs->enable) {
932 qs_cfg->ena = 1;
933#ifdef __BIG_ENDIAN
934 qs_cfg->be = 1;
935#endif
936 qs_cfg->vnic = qs->vnic_id;
937 }
938 nicvf_send_msg_to_pf(nic, &mbx);
939}
940
941static void nicvf_free_resources(struct nicvf *nic)
942{
943 int qidx;
944 struct queue_set *qs = nic->qs;
945
946 /* Free receive buffer descriptor ring */
947 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
948 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
949
950 /* Free completion queue */
951 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
952 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
953
954 /* Free send queue */
955 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
956 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
957}
958
959static int nicvf_alloc_resources(struct nicvf *nic)
960{
961 int qidx;
962 struct queue_set *qs = nic->qs;
963
964 /* Alloc receive buffer descriptor ring */
965 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
966 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
967 DMA_BUFFER_LEN))
968 goto alloc_fail;
969 }
970
971 /* Alloc send queue */
972 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
Sunil Goutham16f2bcc2017-05-02 18:36:56 +0530973 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700974 goto alloc_fail;
975 }
976
977 /* Alloc completion queue */
978 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
979 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
980 goto alloc_fail;
981 }
982
983 return 0;
984alloc_fail:
985 nicvf_free_resources(nic);
986 return -ENOMEM;
987}
988
989int nicvf_set_qset_resources(struct nicvf *nic)
990{
991 struct queue_set *qs;
992
993 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
994 if (!qs)
995 return -ENOMEM;
996 nic->qs = qs;
997
998 /* Set count of each queue */
Sunil Goutham3a397eb2016-08-12 16:51:27 +0530999 qs->rbdr_cnt = DEFAULT_RBDR_CNT;
1000 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
1001 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
1002 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001003
1004 /* Set queue lengths */
1005 qs->rbdr_len = RCV_BUF_COUNT;
1006 qs->sq_len = SND_QUEUE_LEN;
1007 qs->cq_len = CMP_QUEUE_LEN;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001008
1009 nic->rx_queues = qs->rq_cnt;
1010 nic->tx_queues = qs->sq_cnt;
Sunil Goutham05c773f2017-05-02 18:36:54 +05301011 nic->xdp_tx_queues = 0;
Sunil Goutham92dc8762015-08-30 12:29:15 +03001012
Sunil Goutham4863dea2015-05-26 19:20:15 -07001013 return 0;
1014}
1015
1016int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
1017{
1018 bool disable = false;
1019 struct queue_set *qs = nic->qs;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +05301020 struct queue_set *pqs = nic->pnicvf->qs;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001021 int qidx;
1022
1023 if (!qs)
1024 return 0;
1025
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +05301026 /* Take primary VF's queue lengths.
1027 * This is needed to take queue lengths set from ethtool
1028 * into consideration.
1029 */
1030 if (nic->sqs_mode && pqs) {
1031 qs->cq_len = pqs->cq_len;
1032 qs->sq_len = pqs->sq_len;
1033 }
1034
Sunil Goutham4863dea2015-05-26 19:20:15 -07001035 if (enable) {
1036 if (nicvf_alloc_resources(nic))
1037 return -ENOMEM;
1038
1039 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1040 nicvf_snd_queue_config(nic, qs, qidx, enable);
1041 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1042 nicvf_cmp_queue_config(nic, qs, qidx, enable);
1043 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1044 nicvf_rbdr_config(nic, qs, qidx, enable);
1045 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1046 nicvf_rcv_queue_config(nic, qs, qidx, enable);
1047 } else {
1048 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1049 nicvf_rcv_queue_config(nic, qs, qidx, disable);
1050 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1051 nicvf_rbdr_config(nic, qs, qidx, disable);
1052 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1053 nicvf_snd_queue_config(nic, qs, qidx, disable);
1054 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1055 nicvf_cmp_queue_config(nic, qs, qidx, disable);
1056
1057 nicvf_free_resources(nic);
1058 }
1059
Jerin Jacob3458c402016-08-12 16:51:39 +05301060 /* Reset RXQ's stats.
1061 * SQ's stats will get reset automatically once SQ is reset.
1062 */
1063 nicvf_reset_rcv_queue_stats(nic);
1064
Sunil Goutham4863dea2015-05-26 19:20:15 -07001065 return 0;
1066}
1067
1068/* Get a free desc from SQ
1069 * returns descriptor ponter & descriptor number
1070 */
1071static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1072{
1073 int qentry;
1074
1075 qentry = sq->tail;
Sunil Goutham16f2bcc2017-05-02 18:36:56 +05301076 if (!sq->is_xdp)
1077 atomic_sub(desc_cnt, &sq->free_cnt);
1078 else
1079 sq->xdp_free_cnt -= desc_cnt;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001080 sq->tail += desc_cnt;
1081 sq->tail &= (sq->dmem.q_len - 1);
1082
1083 return qentry;
1084}
1085
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301086/* Rollback to previous tail pointer when descriptors not used */
1087static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
1088 int qentry, int desc_cnt)
1089{
1090 sq->tail = qentry;
1091 atomic_add(desc_cnt, &sq->free_cnt);
1092}
1093
Sunil Goutham4863dea2015-05-26 19:20:15 -07001094/* Free descriptor back to SQ for future use */
1095void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1096{
Sunil Goutham16f2bcc2017-05-02 18:36:56 +05301097 if (!sq->is_xdp)
1098 atomic_add(desc_cnt, &sq->free_cnt);
1099 else
1100 sq->xdp_free_cnt += desc_cnt;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001101 sq->head += desc_cnt;
1102 sq->head &= (sq->dmem.q_len - 1);
1103}
1104
1105static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1106{
1107 qentry++;
1108 qentry &= (sq->dmem.q_len - 1);
1109 return qentry;
1110}
1111
1112void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1113{
1114 u64 sq_cfg;
1115
1116 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1117 sq_cfg |= NICVF_SQ_EN;
1118 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1119 /* Ring doorbell so that H/W restarts processing SQEs */
1120 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1121}
1122
1123void nicvf_sq_disable(struct nicvf *nic, int qidx)
1124{
1125 u64 sq_cfg;
1126
1127 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1128 sq_cfg &= ~NICVF_SQ_EN;
1129 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1130}
1131
1132void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
1133 int qidx)
1134{
1135 u64 head, tail;
1136 struct sk_buff *skb;
1137 struct nicvf *nic = netdev_priv(netdev);
1138 struct sq_hdr_subdesc *hdr;
1139
1140 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1141 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1142 while (sq->head != head) {
1143 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1144 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1145 nicvf_put_sq_desc(sq, 1);
1146 continue;
1147 }
1148 skb = (struct sk_buff *)sq->skbuff[sq->head];
Sunil Goutham143ceb02015-07-29 16:49:37 +03001149 if (skb)
1150 dev_kfree_skb_any(skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001151 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
1152 atomic64_add(hdr->tot_len,
1153 (atomic64_t *)&netdev->stats.tx_bytes);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001154 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1155 }
1156}
1157
Sunil Goutham16f2bcc2017-05-02 18:36:56 +05301158/* XDP Transmit APIs */
1159void nicvf_xdp_sq_doorbell(struct nicvf *nic,
1160 struct snd_queue *sq, int sq_num)
1161{
1162 if (!sq->xdp_desc_cnt)
1163 return;
1164
1165 /* make sure all memory stores are done before ringing doorbell */
1166 wmb();
1167
1168 /* Inform HW to xmit all TSO segments */
1169 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1170 sq_num, sq->xdp_desc_cnt);
1171 sq->xdp_desc_cnt = 0;
1172}
1173
1174static inline void
1175nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1176 int subdesc_cnt, u64 data, int len)
1177{
1178 struct sq_hdr_subdesc *hdr;
1179
1180 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1181 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1182 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1183 hdr->subdesc_cnt = subdesc_cnt;
1184 hdr->tot_len = len;
1185 hdr->post_cqe = 1;
1186 sq->xdp_page[qentry] = (u64)virt_to_page((void *)data);
1187}
1188
1189int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
1190 u64 bufaddr, u64 dma_addr, u16 len)
1191{
1192 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
1193 int qentry;
1194
1195 if (subdesc_cnt > sq->xdp_free_cnt)
1196 return 0;
1197
1198 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1199
1200 nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len);
1201
1202 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1203 nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr);
1204
1205 sq->xdp_desc_cnt += subdesc_cnt;
1206
1207 return 1;
1208}
1209
Sunil Goutham4863dea2015-05-26 19:20:15 -07001210/* Calculate no of SQ subdescriptors needed to transmit all
1211 * segments of this TSO packet.
1212 * Taken from 'Tilera network driver' with a minor modification.
1213 */
1214static int nicvf_tso_count_subdescs(struct sk_buff *skb)
1215{
1216 struct skb_shared_info *sh = skb_shinfo(skb);
1217 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1218 unsigned int data_len = skb->len - sh_len;
1219 unsigned int p_len = sh->gso_size;
1220 long f_id = -1; /* id of the current fragment */
1221 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1222 long f_used = 0; /* bytes used from the current fragment */
1223 long n; /* size of the current piece of payload */
1224 int num_edescs = 0;
1225 int segment;
1226
1227 for (segment = 0; segment < sh->gso_segs; segment++) {
1228 unsigned int p_used = 0;
1229
1230 /* One edesc for header and for each piece of the payload. */
1231 for (num_edescs++; p_used < p_len; num_edescs++) {
1232 /* Advance as needed. */
1233 while (f_used >= f_size) {
1234 f_id++;
1235 f_size = skb_frag_size(&sh->frags[f_id]);
1236 f_used = 0;
1237 }
1238
1239 /* Use bytes from the current fragment. */
1240 n = p_len - p_used;
1241 if (n > f_size - f_used)
1242 n = f_size - f_used;
1243 f_used += n;
1244 p_used += n;
1245 }
1246
1247 /* The last segment may be less than gso_size. */
1248 data_len -= p_len;
1249 if (data_len < p_len)
1250 p_len = data_len;
1251 }
1252
1253 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
1254 return num_edescs + sh->gso_segs;
1255}
1256
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301257#define POST_CQE_DESC_COUNT 2
1258
Sunil Goutham4863dea2015-05-26 19:20:15 -07001259/* Get the number of SQ descriptors needed to xmit this skb */
1260static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
1261{
1262 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
1263
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301264 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001265 subdesc_cnt = nicvf_tso_count_subdescs(skb);
1266 return subdesc_cnt;
1267 }
1268
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301269 /* Dummy descriptors to get TSO pkt completion notification */
1270 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
1271 subdesc_cnt += POST_CQE_DESC_COUNT;
1272
Sunil Goutham4863dea2015-05-26 19:20:15 -07001273 if (skb_shinfo(skb)->nr_frags)
1274 subdesc_cnt += skb_shinfo(skb)->nr_frags;
1275
1276 return subdesc_cnt;
1277}
1278
1279/* Add SQ HEADER subdescriptor.
1280 * First subdescriptor for every send descriptor.
1281 */
1282static inline void
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301283nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001284 int subdesc_cnt, struct sk_buff *skb, int len)
1285{
1286 int proto;
1287 struct sq_hdr_subdesc *hdr;
Thanneeru Srinivasulu3a9024f2017-04-06 16:12:26 +05301288 union {
1289 struct iphdr *v4;
1290 struct ipv6hdr *v6;
1291 unsigned char *hdr;
1292 } ip;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001293
Thanneeru Srinivasulu3a9024f2017-04-06 16:12:26 +05301294 ip.hdr = skb_network_header(skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001295 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001296 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1297 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301298
1299 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
1300 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
1301 * segment transmitted on 88xx.
1302 */
1303 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
1304 } else {
1305 sq->skbuff[qentry] = (u64)skb;
1306 /* Enable notification via CQE after processing SQE */
1307 hdr->post_cqe = 1;
1308 /* No of subdescriptors following this */
1309 hdr->subdesc_cnt = subdesc_cnt;
1310 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001311 hdr->tot_len = len;
1312
1313 /* Offload checksum calculation to HW */
1314 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001315 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1316 hdr->l3_offset = skb_network_offset(skb);
1317 hdr->l4_offset = skb_transport_offset(skb);
1318
Thanneeru Srinivasulu3a9024f2017-04-06 16:12:26 +05301319 proto = (ip.v4->version == 4) ? ip.v4->protocol :
1320 ip.v6->nexthdr;
1321
Sunil Goutham4863dea2015-05-26 19:20:15 -07001322 switch (proto) {
1323 case IPPROTO_TCP:
1324 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1325 break;
1326 case IPPROTO_UDP:
1327 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1328 break;
1329 case IPPROTO_SCTP:
1330 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1331 break;
1332 }
1333 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301334
1335 if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
1336 hdr->tso = 1;
1337 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
1338 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1339 /* For non-tunneled pkts, point this to L2 ethertype */
1340 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
Sunil Goutham964cb692016-11-15 17:38:16 +05301341 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301342 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001343}
1344
1345/* SQ GATHER subdescriptor
1346 * Must follow HDR descriptor
1347 */
1348static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1349 int size, u64 data)
1350{
1351 struct sq_gather_subdesc *gather;
1352
1353 qentry &= (sq->dmem.q_len - 1);
1354 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1355
1356 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1357 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
Sunil Goutham4b561c12015-07-29 16:49:36 +03001358 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001359 gather->size = size;
1360 gather->addr = data;
1361}
1362
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301363/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1364 * packet so that a CQE is posted as a notifation for transmission of
1365 * TSO packet.
1366 */
1367static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1368 int tso_sqe, struct sk_buff *skb)
1369{
1370 struct sq_imm_subdesc *imm;
1371 struct sq_hdr_subdesc *hdr;
1372
1373 sq->skbuff[qentry] = (u64)skb;
1374
1375 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1376 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1377 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1378 /* Enable notification via CQE after processing SQE */
1379 hdr->post_cqe = 1;
1380 /* There is no packet to transmit here */
1381 hdr->dont_send = 1;
1382 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1383 hdr->tot_len = 1;
1384 /* Actual TSO header SQE index, needed for cleanup */
1385 hdr->rsvd2 = tso_sqe;
1386
1387 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1388 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1389 memset(imm, 0, SND_QUEUE_DESC_SIZE);
1390 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1391 imm->len = 1;
1392}
1393
Sunil Goutham2c204c22016-09-23 14:42:28 +05301394static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
1395 int sq_num, int desc_cnt)
1396{
1397 struct netdev_queue *txq;
1398
1399 txq = netdev_get_tx_queue(nic->pnicvf->netdev,
1400 skb_get_queue_mapping(skb));
1401
1402 netdev_tx_sent_queue(txq, skb->len);
1403
1404 /* make sure all memory stores are done before ringing doorbell */
1405 smp_wmb();
1406
1407 /* Inform HW to xmit all TSO segments */
1408 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1409 sq_num, desc_cnt);
1410}
1411
Sunil Goutham4863dea2015-05-26 19:20:15 -07001412/* Segment a TSO packet into 'gso_size' segments and append
1413 * them to SQ for transfer
1414 */
1415static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
Sunil Goutham92dc8762015-08-30 12:29:15 +03001416 int sq_num, int qentry, struct sk_buff *skb)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001417{
1418 struct tso_t tso;
1419 int seg_subdescs = 0, desc_cnt = 0;
1420 int seg_len, total_len, data_left;
1421 int hdr_qentry = qentry;
1422 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1423
1424 tso_start(skb, &tso);
1425 total_len = skb->len - hdr_len;
1426 while (total_len > 0) {
1427 char *hdr;
1428
1429 /* Save Qentry for adding HDR_SUBDESC at the end */
1430 hdr_qentry = qentry;
1431
1432 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1433 total_len -= data_left;
1434
1435 /* Add segment's header */
1436 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1437 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1438 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1439 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1440 sq->tso_hdrs_phys +
1441 qentry * TSO_HEADER_SIZE);
1442 /* HDR_SUDESC + GATHER */
1443 seg_subdescs = 2;
1444 seg_len = hdr_len;
1445
1446 /* Add segment's payload fragments */
1447 while (data_left > 0) {
1448 int size;
1449
1450 size = min_t(int, tso.size, data_left);
1451
1452 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1453 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1454 virt_to_phys(tso.data));
1455 seg_subdescs++;
1456 seg_len += size;
1457
1458 data_left -= size;
1459 tso_build_data(skb, &tso, size);
1460 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301461 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001462 seg_subdescs - 1, skb, seg_len);
Sunil Goutham143ceb02015-07-29 16:49:37 +03001463 sq->skbuff[hdr_qentry] = (u64)NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001464 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1465
1466 desc_cnt += seg_subdescs;
1467 }
1468 /* Save SKB in the last segment for freeing */
1469 sq->skbuff[hdr_qentry] = (u64)skb;
1470
Sunil Goutham2c204c22016-09-23 14:42:28 +05301471 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001472
Sunil Goutham964cb692016-11-15 17:38:16 +05301473 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001474 return 1;
1475}
1476
1477/* Append an skb to a SQ for packet transfer. */
Sunil Gouthambd3ad7d2016-12-01 18:24:28 +05301478int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1479 struct sk_buff *skb, u8 sq_num)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001480{
1481 int i, size;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301482 int subdesc_cnt, hdr_sqe = 0;
Sunil Gouthambd3ad7d2016-12-01 18:24:28 +05301483 int qentry;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301484 u64 dma_addr;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001485
1486 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1487 if (subdesc_cnt > atomic_read(&sq->free_cnt))
1488 goto append_fail;
1489
1490 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1491
1492 /* Check if its a TSO packet */
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301493 if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
Sunil Goutham92dc8762015-08-30 12:29:15 +03001494 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001495
1496 /* Add SQ header subdesc */
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301497 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1498 skb, skb->len);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301499 hdr_sqe = qentry;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001500
1501 /* Add SQ gather subdescs */
1502 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1503 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301504 /* HW will ensure data coherency, CPU sync not required */
1505 dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
1506 offset_in_page(skb->data), size,
1507 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1508 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1509 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1510 return 0;
1511 }
1512
1513 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001514
1515 /* Check for scattered buffer */
1516 if (!skb_is_nonlinear(skb))
1517 goto doorbell;
1518
1519 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1520 const struct skb_frag_struct *frag;
1521
1522 frag = &skb_shinfo(skb)->frags[i];
1523
1524 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1525 size = skb_frag_size(frag);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301526 dma_addr = dma_map_page_attrs(&nic->pdev->dev,
1527 skb_frag_page(frag),
1528 frag->page_offset, size,
1529 DMA_TO_DEVICE,
1530 DMA_ATTR_SKIP_CPU_SYNC);
1531 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1532 /* Free entire chain of mapped buffers
1533 * here 'i' = frags mapped + above mapped skb->data
1534 */
1535 nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
1536 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1537 return 0;
1538 }
1539 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001540 }
1541
1542doorbell:
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301543 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1544 qentry = nicvf_get_nxt_sqentry(sq, qentry);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301545 nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301546 }
1547
Sunil Goutham2c204c22016-09-23 14:42:28 +05301548 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001549
Sunil Goutham4863dea2015-05-26 19:20:15 -07001550 return 1;
1551
1552append_fail:
Sunil Goutham92dc8762015-08-30 12:29:15 +03001553 /* Use original PCI dev for debug log */
1554 nic = nic->pnicvf;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001555 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1556 return 0;
1557}
1558
1559static inline unsigned frag_num(unsigned i)
1560{
1561#ifdef __BIG_ENDIAN
1562 return (i & ~3) + 3 - (i & 3);
1563#else
1564 return i;
1565#endif
1566}
1567
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301568static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
1569 u64 buf_addr, bool xdp)
1570{
1571 struct page *page = NULL;
1572 int len = RCV_FRAG_LEN;
1573
1574 if (xdp) {
1575 page = virt_to_page(phys_to_virt(buf_addr));
1576 /* Check if it's a recycled page, if not
1577 * unmap the DMA mapping.
1578 *
1579 * Recycled page holds an extra reference.
1580 */
1581 if (page_ref_count(page) != 1)
1582 return;
Sunil Gouthame3d06ff2017-05-02 18:36:57 +05301583
1584 len += XDP_PACKET_HEADROOM;
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301585 /* Receive buffers in XDP mode are mapped from page start */
1586 dma_addr &= PAGE_MASK;
1587 }
1588 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
1589 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1590}
1591
Sunil Goutham4863dea2015-05-26 19:20:15 -07001592/* Returns SKB for a received packet */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301593struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
1594 struct cqe_rx_t *cqe_rx, bool xdp)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001595{
1596 int frag;
1597 int payload_len = 0;
1598 struct sk_buff *skb = NULL;
Sunil Gouthama8671ac2016-08-12 16:51:37 +05301599 struct page *page;
1600 int offset;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001601 u16 *rb_lens = NULL;
1602 u64 *rb_ptrs = NULL;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301603 u64 phys_addr;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001604
1605 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
Sunil Goutham02a72bd2016-08-12 16:51:28 +05301606 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
1607 * CQE_RX at word6, hence buffer pointers move by word
1608 *
1609 * Use existing 'hw_tso' flag which will be set for all chips
1610 * except 88xx pass1 instead of a additional cache line
1611 * access (or miss) by using pci dev's revision.
1612 */
1613 if (!nic->hw_tso)
1614 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1615 else
1616 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
Sunil Goutham4863dea2015-05-26 19:20:15 -07001617
Sunil Goutham4863dea2015-05-26 19:20:15 -07001618 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1619 payload_len = rb_lens[frag_num(frag)];
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301620 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
1621 if (!phys_addr) {
1622 if (skb)
1623 dev_kfree_skb_any(skb);
1624 return NULL;
1625 }
1626
Sunil Goutham4863dea2015-05-26 19:20:15 -07001627 if (!frag) {
1628 /* First fragment */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301629 nicvf_unmap_rcv_buffer(nic,
1630 *rb_ptrs - cqe_rx->align_pad,
1631 phys_addr, xdp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001632 skb = nicvf_rb_ptr_to_skb(nic,
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301633 phys_addr - cqe_rx->align_pad,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001634 payload_len);
1635 if (!skb)
1636 return NULL;
1637 skb_reserve(skb, cqe_rx->align_pad);
1638 skb_put(skb, payload_len);
1639 } else {
1640 /* Add fragments */
Sunil Gouthamc56d91c2017-05-02 18:36:55 +05301641 nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301642 page = virt_to_page(phys_to_virt(phys_addr));
1643 offset = phys_to_virt(phys_addr) - page_address(page);
Sunil Gouthama8671ac2016-08-12 16:51:37 +05301644 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1645 offset, payload_len, RCV_FRAG_LEN);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001646 }
1647 /* Next buffer pointer */
1648 rb_ptrs++;
1649 }
1650 return skb;
1651}
1652
Yury Norovb45ceb42015-12-07 10:30:32 +05301653static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001654{
1655 u64 reg_val;
1656
Sunil Goutham4863dea2015-05-26 19:20:15 -07001657 switch (int_type) {
1658 case NICVF_INTR_CQ:
1659 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1660 break;
1661 case NICVF_INTR_SQ:
1662 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1663 break;
1664 case NICVF_INTR_RBDR:
1665 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1666 break;
1667 case NICVF_INTR_PKT_DROP:
1668 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1669 break;
1670 case NICVF_INTR_TCP_TIMER:
1671 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1672 break;
1673 case NICVF_INTR_MBOX:
1674 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1675 break;
1676 case NICVF_INTR_QS_ERR:
Yury Norovb45ceb42015-12-07 10:30:32 +05301677 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001678 break;
1679 default:
Yury Norovb45ceb42015-12-07 10:30:32 +05301680 reg_val = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001681 }
1682
Yury Norovb45ceb42015-12-07 10:30:32 +05301683 return reg_val;
1684}
1685
1686/* Enable interrupt */
1687void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1688{
1689 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1690
1691 if (!mask) {
1692 netdev_dbg(nic->netdev,
1693 "Failed to enable interrupt: unknown type\n");
1694 return;
1695 }
1696 nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1697 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
1698}
1699
1700/* Disable interrupt */
1701void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1702{
1703 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1704
1705 if (!mask) {
1706 netdev_dbg(nic->netdev,
1707 "Failed to disable interrupt: unknown type\n");
1708 return;
1709 }
1710
1711 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
1712}
1713
1714/* Clear interrupt */
1715void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1716{
1717 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1718
1719 if (!mask) {
1720 netdev_dbg(nic->netdev,
1721 "Failed to clear interrupt: unknown type\n");
1722 return;
1723 }
1724
1725 nicvf_reg_write(nic, NIC_VF_INT, mask);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001726}
1727
1728/* Check if interrupt is enabled */
1729int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1730{
Yury Norovb45ceb42015-12-07 10:30:32 +05301731 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1732 /* If interrupt type is unknown, we treat it disabled. */
1733 if (!mask) {
1734 netdev_dbg(nic->netdev,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001735 "Failed to check interrupt enable: unknown type\n");
Yury Norovb45ceb42015-12-07 10:30:32 +05301736 return 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001737 }
1738
Yury Norovb45ceb42015-12-07 10:30:32 +05301739 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001740}
1741
1742void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1743{
1744 struct rcv_queue *rq;
1745
1746#define GET_RQ_STATS(reg) \
1747 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1748 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1749
1750 rq = &nic->qs->rq[rq_idx];
1751 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1752 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1753}
1754
1755void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1756{
1757 struct snd_queue *sq;
1758
1759#define GET_SQ_STATS(reg) \
1760 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1761 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1762
1763 sq = &nic->qs->sq[sq_idx];
1764 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1765 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1766}
1767
1768/* Check for errors in the receive cmp.queue entry */
Sunil Gouthamad2eceb2016-02-16 16:29:51 +05301769int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001770{
Sunil Goutham4863dea2015-05-26 19:20:15 -07001771 if (netif_msg_rx_err(nic))
1772 netdev_err(nic->netdev,
1773 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1774 nic->netdev->name,
1775 cqe_rx->err_level, cqe_rx->err_opcode);
1776
Sunil Goutham4863dea2015-05-26 19:20:15 -07001777 switch (cqe_rx->err_opcode) {
1778 case CQ_RX_ERROP_RE_PARTIAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301779 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001780 break;
1781 case CQ_RX_ERROP_RE_JABBER:
Sunil Goutham964cb692016-11-15 17:38:16 +05301782 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001783 break;
1784 case CQ_RX_ERROP_RE_FCS:
Sunil Goutham964cb692016-11-15 17:38:16 +05301785 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001786 break;
1787 case CQ_RX_ERROP_RE_RX_CTL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301788 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001789 break;
1790 case CQ_RX_ERROP_PREL2_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301791 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001792 break;
1793 case CQ_RX_ERROP_L2_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301794 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001795 break;
1796 case CQ_RX_ERROP_L2_OVERSIZE:
Sunil Goutham964cb692016-11-15 17:38:16 +05301797 this_cpu_inc(nic->drv_stats->rx_oversize);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001798 break;
1799 case CQ_RX_ERROP_L2_UNDERSIZE:
Sunil Goutham964cb692016-11-15 17:38:16 +05301800 this_cpu_inc(nic->drv_stats->rx_undersize);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001801 break;
1802 case CQ_RX_ERROP_L2_LENMISM:
Sunil Goutham964cb692016-11-15 17:38:16 +05301803 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001804 break;
1805 case CQ_RX_ERROP_L2_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301806 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001807 break;
1808 case CQ_RX_ERROP_IP_NOT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301809 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001810 break;
1811 case CQ_RX_ERROP_IP_CSUM_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301812 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001813 break;
1814 case CQ_RX_ERROP_IP_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301815 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001816 break;
1817 case CQ_RX_ERROP_IP_MALD:
Sunil Goutham964cb692016-11-15 17:38:16 +05301818 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001819 break;
1820 case CQ_RX_ERROP_IP_HOP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301821 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001822 break;
1823 case CQ_RX_ERROP_L3_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301824 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001825 break;
1826 case CQ_RX_ERROP_L4_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301827 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001828 break;
1829 case CQ_RX_ERROP_L4_CHK:
Sunil Goutham964cb692016-11-15 17:38:16 +05301830 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001831 break;
1832 case CQ_RX_ERROP_UDP_LEN:
Sunil Goutham964cb692016-11-15 17:38:16 +05301833 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001834 break;
1835 case CQ_RX_ERROP_L4_PORT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301836 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001837 break;
1838 case CQ_RX_ERROP_TCP_FLAG:
Sunil Goutham964cb692016-11-15 17:38:16 +05301839 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001840 break;
1841 case CQ_RX_ERROP_TCP_OFFSET:
Sunil Goutham964cb692016-11-15 17:38:16 +05301842 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001843 break;
1844 case CQ_RX_ERROP_L4_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301845 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001846 break;
1847 case CQ_RX_ERROP_RBDR_TRUNC:
Sunil Goutham964cb692016-11-15 17:38:16 +05301848 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001849 break;
1850 }
1851
1852 return 1;
1853}
1854
1855/* Check for errors in the send cmp.queue entry */
Sunil Goutham964cb692016-11-15 17:38:16 +05301856int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001857{
Sunil Goutham4863dea2015-05-26 19:20:15 -07001858 switch (cqe_tx->send_status) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001859 case CQ_TX_ERROP_DESC_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301860 this_cpu_inc(nic->drv_stats->tx_desc_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001861 break;
1862 case CQ_TX_ERROP_HDR_CONS_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301863 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001864 break;
1865 case CQ_TX_ERROP_SUBDC_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301866 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001867 break;
Sunil Goutham712c3182016-11-15 17:37:36 +05301868 case CQ_TX_ERROP_MAX_SIZE_VIOL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301869 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
Sunil Goutham712c3182016-11-15 17:37:36 +05301870 break;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001871 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
Sunil Goutham964cb692016-11-15 17:38:16 +05301872 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001873 break;
1874 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301875 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001876 break;
1877 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301878 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001879 break;
1880 case CQ_TX_ERROP_LOCK_VIOL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301881 this_cpu_inc(nic->drv_stats->tx_lock_viol);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001882 break;
1883 case CQ_TX_ERROP_DATA_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301884 this_cpu_inc(nic->drv_stats->tx_data_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001885 break;
1886 case CQ_TX_ERROP_TSTMP_CONFLICT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301887 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001888 break;
1889 case CQ_TX_ERROP_TSTMP_TIMEOUT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301890 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001891 break;
1892 case CQ_TX_ERROP_MEM_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301893 this_cpu_inc(nic->drv_stats->tx_mem_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001894 break;
1895 case CQ_TX_ERROP_CK_OVERLAP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301896 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001897 break;
1898 case CQ_TX_ERROP_CK_OFLOW:
Sunil Goutham964cb692016-11-15 17:38:16 +05301899 this_cpu_inc(nic->drv_stats->tx_csum_overflow);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001900 break;
1901 }
1902
1903 return 1;
1904}