blob: 12f9709bb1808c6c343fec4730fe947aeb34e0b8 [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/pci.h>
10#include <linux/netdevice.h>
11#include <linux/ip.h>
12#include <linux/etherdevice.h>
Sunil Goutham83abb7d2017-03-07 18:09:08 +053013#include <linux/iommu.h>
Sunil Goutham4863dea2015-05-26 19:20:15 -070014#include <net/ip.h>
15#include <net/tso.h>
16
17#include "nic_reg.h"
18#include "nic.h"
19#include "q_struct.h"
20#include "nicvf_queues.h"
21
Sunil Goutham83abb7d2017-03-07 18:09:08 +053022static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
23{
24 /* Translation is installed only when IOMMU is present */
25 if (nic->iommu_domain)
26 return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
27 return dma_addr;
28}
29
Sunil Goutham5c2e26f2016-03-14 16:36:14 +053030static void nicvf_get_page(struct nicvf *nic)
31{
32 if (!nic->rb_pageref || !nic->rb_page)
33 return;
34
Joonsoo Kim6d061f92016-05-19 17:10:46 -070035 page_ref_add(nic->rb_page, nic->rb_pageref);
Sunil Goutham5c2e26f2016-03-14 16:36:14 +053036 nic->rb_pageref = 0;
37}
38
Sunil Goutham4863dea2015-05-26 19:20:15 -070039/* Poll a register for a specific value */
40static int nicvf_poll_reg(struct nicvf *nic, int qidx,
41 u64 reg, int bit_pos, int bits, int val)
42{
43 u64 bit_mask;
44 u64 reg_val;
45 int timeout = 10;
46
47 bit_mask = (1ULL << bits) - 1;
48 bit_mask = (bit_mask << bit_pos);
49
50 while (timeout) {
51 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
52 if (((reg_val & bit_mask) >> bit_pos) == val)
53 return 0;
54 usleep_range(1000, 2000);
55 timeout--;
56 }
57 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
58 return 1;
59}
60
61/* Allocate memory for a queue's descriptors */
62static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
63 int q_len, int desc_size, int align_bytes)
64{
65 dmem->q_len = q_len;
66 dmem->size = (desc_size * q_len) + align_bytes;
67 /* Save address, need it while freeing */
68 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
69 &dmem->dma, GFP_KERNEL);
70 if (!dmem->unalign_base)
71 return -ENOMEM;
72
73 /* Align memory address for 'align_bytes' */
74 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
Aleksey Makarov39a0dd02015-06-02 11:00:25 -070075 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
Sunil Goutham4863dea2015-05-26 19:20:15 -070076 return 0;
77}
78
79/* Free queue's descriptor memory */
80static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
81{
82 if (!dmem)
83 return;
84
85 dma_free_coherent(&nic->pdev->dev, dmem->size,
86 dmem->unalign_base, dmem->dma);
87 dmem->unalign_base = NULL;
88 dmem->base = NULL;
89}
90
Sunil Goutham5836b442017-05-02 18:36:50 +053091/* Allocate a new page or recycle one if possible
92 *
93 * We cannot optimize dma mapping here, since
94 * 1. It's only one RBDR ring for 8 Rx queues.
95 * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed
96 * and not idx into RBDR ring, so can't refer to saved info.
97 * 3. There are multiple receive buffers per page
Sunil Goutham4863dea2015-05-26 19:20:15 -070098 */
Sunil Goutham5836b442017-05-02 18:36:50 +053099static struct pgcache *nicvf_alloc_page(struct nicvf *nic,
100 struct rbdr *rbdr, gfp_t gfp)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700101{
Sunil Goutham5836b442017-05-02 18:36:50 +0530102 struct page *page = NULL;
103 struct pgcache *pgcache, *next;
104
105 /* Check if page is already allocated */
106 pgcache = &rbdr->pgcache[rbdr->pgidx];
107 page = pgcache->page;
108 /* Check if page can be recycled */
109 if (page && (page_ref_count(page) != 1))
110 page = NULL;
111
112 if (!page) {
113 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
114 if (!page)
115 return NULL;
116
117 this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
118
119 /* Check for space */
120 if (rbdr->pgalloc >= rbdr->pgcnt) {
121 /* Page can still be used */
122 nic->rb_page = page;
123 return NULL;
124 }
125
126 /* Save the page in page cache */
127 pgcache->page = page;
128 rbdr->pgalloc++;
129 }
130
131 /* Take extra page reference for recycling */
132 page_ref_add(page, 1);
133
134 rbdr->pgidx++;
135 rbdr->pgidx &= (rbdr->pgcnt - 1);
136
137 /* Prefetch refcount of next page in page cache */
138 next = &rbdr->pgcache[rbdr->pgidx];
139 page = next->page;
140 if (page)
141 prefetch(&page->_refcount);
142
143 return pgcache;
144}
145
146/* Allocate buffer for packet reception */
147static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
148 gfp_t gfp, u32 buf_len, u64 **rbuf)
149{
150 struct pgcache *pgcache = NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700151
152 /* Check if request can be accomodated in previous allocated page */
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530153 if (nic->rb_page &&
Sunil Goutham5836b442017-05-02 18:36:50 +0530154 ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530155 nic->rb_pageref++;
156 goto ret;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700157 }
158
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530159 nicvf_get_page(nic);
Sunil Goutham5836b442017-05-02 18:36:50 +0530160 nic->rb_page = NULL;
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530161
Sunil Goutham5836b442017-05-02 18:36:50 +0530162 /* Get new page, either recycled or new one */
163 pgcache = nicvf_alloc_page(nic, rbdr, gfp);
164 if (!pgcache && !nic->rb_page) {
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530165 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
166 return -ENOMEM;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700167 }
Sunil Goutham5836b442017-05-02 18:36:50 +0530168
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530169 nic->rb_page_offset = 0;
Sunil Goutham5836b442017-05-02 18:36:50 +0530170 /* Check if it's recycled */
171 if (pgcache)
172 nic->rb_page = pgcache->page;
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530173ret:
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530174 /* HW will ensure data coherency, CPU sync not required */
175 *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
176 nic->rb_page_offset, buf_len,
177 DMA_FROM_DEVICE,
178 DMA_ATTR_SKIP_CPU_SYNC));
179 if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
180 if (!nic->rb_page_offset)
Sunil Goutham5836b442017-05-02 18:36:50 +0530181 __free_pages(nic->rb_page, 0);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530182 nic->rb_page = NULL;
183 return -ENOMEM;
184 }
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530185 nic->rb_page_offset += buf_len;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700186
Sunil Goutham4863dea2015-05-26 19:20:15 -0700187 return 0;
188}
189
Sunil Goutham668dda02015-12-07 10:30:33 +0530190/* Build skb around receive buffer */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700191static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
192 u64 rb_ptr, int len)
193{
Sunil Goutham668dda02015-12-07 10:30:33 +0530194 void *data;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700195 struct sk_buff *skb;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700196
Sunil Goutham668dda02015-12-07 10:30:33 +0530197 data = phys_to_virt(rb_ptr);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700198
199 /* Now build an skb to give to stack */
Sunil Goutham668dda02015-12-07 10:30:33 +0530200 skb = build_skb(data, RCV_FRAG_LEN);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700201 if (!skb) {
Sunil Goutham668dda02015-12-07 10:30:33 +0530202 put_page(virt_to_page(data));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700203 return NULL;
204 }
205
Sunil Goutham668dda02015-12-07 10:30:33 +0530206 prefetch(skb->data);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700207 return skb;
208}
209
210/* Allocate RBDR ring and populate receive buffers */
211static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
212 int ring_len, int buf_size)
213{
214 int idx;
215 u64 *rbuf;
216 struct rbdr_entry_t *desc;
217 int err;
218
219 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
220 sizeof(struct rbdr_entry_t),
221 NICVF_RCV_BUF_ALIGN_BYTES);
222 if (err)
223 return err;
224
225 rbdr->desc = rbdr->dmem.base;
226 /* Buffer size has to be in multiples of 128 bytes */
227 rbdr->dma_size = buf_size;
228 rbdr->enable = true;
229 rbdr->thresh = RBDR_THRESH;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530230 rbdr->head = 0;
231 rbdr->tail = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700232
Sunil Goutham5836b442017-05-02 18:36:50 +0530233 /* Initialize page recycling stuff.
234 *
235 * Can't use single buffer per page especially with 64K pages.
236 * On embedded platforms i.e 81xx/83xx available memory itself
237 * is low and minimum ring size of RBDR is 8K, that takes away
238 * lots of memory.
239 */
240 rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
241 rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
242 rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) *
243 rbdr->pgcnt, GFP_KERNEL);
244 if (!rbdr->pgcache)
245 return -ENOMEM;
246 rbdr->pgidx = 0;
247 rbdr->pgalloc = 0;
248
Sunil Goutham4863dea2015-05-26 19:20:15 -0700249 nic->rb_page = NULL;
250 for (idx = 0; idx < ring_len; idx++) {
Sunil Goutham5836b442017-05-02 18:36:50 +0530251 err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
252 RCV_FRAG_LEN, &rbuf);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530253 if (err) {
254 /* To free already allocated and mapped ones */
255 rbdr->tail = idx - 1;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700256 return err;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530257 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700258
259 desc = GET_RBDR_DESC(rbdr, idx);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530260 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700261 }
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530262
263 nicvf_get_page(nic);
264
Sunil Goutham4863dea2015-05-26 19:20:15 -0700265 return 0;
266}
267
268/* Free RBDR ring and its receive buffers */
269static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
270{
271 int head, tail;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530272 u64 buf_addr, phys_addr;
Sunil Goutham5836b442017-05-02 18:36:50 +0530273 struct pgcache *pgcache;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700274 struct rbdr_entry_t *desc;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700275
276 if (!rbdr)
277 return;
278
279 rbdr->enable = false;
280 if (!rbdr->dmem.base)
281 return;
282
283 head = rbdr->head;
284 tail = rbdr->tail;
285
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530286 /* Release page references */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700287 while (head != tail) {
288 desc = GET_RBDR_DESC(rbdr, head);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530289 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
290 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
291 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
292 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
293 if (phys_addr)
294 put_page(virt_to_page(phys_to_virt(phys_addr)));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700295 head++;
296 head &= (rbdr->dmem.q_len - 1);
297 }
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530298 /* Release buffer of tail desc */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700299 desc = GET_RBDR_DESC(rbdr, tail);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530300 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
301 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
302 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
303 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
304 if (phys_addr)
305 put_page(virt_to_page(phys_to_virt(phys_addr)));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700306
Sunil Goutham5836b442017-05-02 18:36:50 +0530307 /* Sync page cache info */
308 smp_rmb();
309
310 /* Release additional page references held for recycling */
311 head = 0;
312 while (head < rbdr->pgcnt) {
313 pgcache = &rbdr->pgcache[head];
314 if (pgcache->page && page_ref_count(pgcache->page) != 0)
315 put_page(pgcache->page);
316 head++;
317 }
318
Sunil Goutham4863dea2015-05-26 19:20:15 -0700319 /* Free RBDR ring */
320 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
321}
322
323/* Refill receive buffer descriptors with new buffers.
324 */
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700325static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700326{
327 struct queue_set *qs = nic->qs;
328 int rbdr_idx = qs->rbdr_cnt;
329 int tail, qcount;
330 int refill_rb_cnt;
331 struct rbdr *rbdr;
332 struct rbdr_entry_t *desc;
333 u64 *rbuf;
334 int new_rb = 0;
335
336refill:
337 if (!rbdr_idx)
338 return;
339 rbdr_idx--;
340 rbdr = &qs->rbdr[rbdr_idx];
341 /* Check if it's enabled */
342 if (!rbdr->enable)
343 goto next_rbdr;
344
345 /* Get no of desc's to be refilled */
346 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
347 qcount &= 0x7FFFF;
348 /* Doorbell can be ringed with a max of ring size minus 1 */
349 if (qcount >= (qs->rbdr_len - 1))
350 goto next_rbdr;
351 else
352 refill_rb_cnt = qs->rbdr_len - qcount - 1;
353
Sunil Goutham5836b442017-05-02 18:36:50 +0530354 /* Sync page cache info */
355 smp_rmb();
356
Sunil Goutham4863dea2015-05-26 19:20:15 -0700357 /* Start filling descs from tail */
358 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
359 while (refill_rb_cnt) {
360 tail++;
361 tail &= (rbdr->dmem.q_len - 1);
362
Sunil Goutham5836b442017-05-02 18:36:50 +0530363 if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700364 break;
365
366 desc = GET_RBDR_DESC(rbdr, tail);
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530367 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700368 refill_rb_cnt--;
369 new_rb++;
370 }
371
Sunil Goutham5c2e26f2016-03-14 16:36:14 +0530372 nicvf_get_page(nic);
373
Sunil Goutham4863dea2015-05-26 19:20:15 -0700374 /* make sure all memory stores are done before ringing doorbell */
375 smp_wmb();
376
377 /* Check if buffer allocation failed */
378 if (refill_rb_cnt)
379 nic->rb_alloc_fail = true;
380 else
381 nic->rb_alloc_fail = false;
382
383 /* Notify HW */
384 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
385 rbdr_idx, new_rb);
386next_rbdr:
387 /* Re-enable RBDR interrupts only if buffer allocation is success */
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530388 if (!nic->rb_alloc_fail && rbdr->enable &&
389 netif_running(nic->pnicvf->netdev))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700390 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
391
392 if (rbdr_idx)
393 goto refill;
394}
395
396/* Alloc rcv buffers in non-atomic mode for better success */
397void nicvf_rbdr_work(struct work_struct *work)
398{
399 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
400
401 nicvf_refill_rbdr(nic, GFP_KERNEL);
402 if (nic->rb_alloc_fail)
403 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
404 else
405 nic->rb_work_scheduled = false;
406}
407
408/* In Softirq context, alloc rcv buffers in atomic mode */
409void nicvf_rbdr_task(unsigned long data)
410{
411 struct nicvf *nic = (struct nicvf *)data;
412
413 nicvf_refill_rbdr(nic, GFP_ATOMIC);
414 if (nic->rb_alloc_fail) {
415 nic->rb_work_scheduled = true;
416 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
417 }
418}
419
420/* Initialize completion queue */
421static int nicvf_init_cmp_queue(struct nicvf *nic,
422 struct cmp_queue *cq, int q_len)
423{
424 int err;
425
426 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
427 NICVF_CQ_BASE_ALIGN_BYTES);
428 if (err)
429 return err;
430
431 cq->desc = cq->dmem.base;
Sunil Gouthamb9687b42015-12-10 13:25:20 +0530432 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700433 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
434
435 return 0;
436}
437
438static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
439{
440 if (!cq)
441 return;
442 if (!cq->dmem.base)
443 return;
444
445 nicvf_free_q_desc_mem(nic, &cq->dmem);
446}
447
448/* Initialize transmit queue */
449static int nicvf_init_snd_queue(struct nicvf *nic,
450 struct snd_queue *sq, int q_len)
451{
452 int err;
453
454 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
455 NICVF_SQ_BASE_ALIGN_BYTES);
456 if (err)
457 return err;
458
459 sq->desc = sq->dmem.base;
Aleksey Makarov86ace692015-06-02 11:00:27 -0700460 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
Aleksey Makarovfa1a6c92015-06-02 11:00:26 -0700461 if (!sq->skbuff)
462 return -ENOMEM;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700463 sq->head = 0;
464 sq->tail = 0;
465 atomic_set(&sq->free_cnt, q_len - 1);
466 sq->thresh = SND_QUEUE_THRESH;
467
468 /* Preallocate memory for TSO segment's header */
469 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
470 q_len * TSO_HEADER_SIZE,
471 &sq->tso_hdrs_phys, GFP_KERNEL);
472 if (!sq->tso_hdrs)
473 return -ENOMEM;
474
475 return 0;
476}
477
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530478void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
479 int hdr_sqe, u8 subdesc_cnt)
480{
481 u8 idx;
482 struct sq_gather_subdesc *gather;
483
484 /* Unmap DMA mapped skb data buffers */
485 for (idx = 0; idx < subdesc_cnt; idx++) {
486 hdr_sqe++;
487 hdr_sqe &= (sq->dmem.q_len - 1);
488 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
489 /* HW will ensure data coherency, CPU sync not required */
490 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
491 gather->size, DMA_TO_DEVICE,
492 DMA_ATTR_SKIP_CPU_SYNC);
493 }
494}
495
Sunil Goutham4863dea2015-05-26 19:20:15 -0700496static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
497{
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530498 struct sk_buff *skb;
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530499 struct sq_hdr_subdesc *hdr;
500 struct sq_hdr_subdesc *tso_sqe;
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530501
Sunil Goutham4863dea2015-05-26 19:20:15 -0700502 if (!sq)
503 return;
504 if (!sq->dmem.base)
505 return;
506
507 if (sq->tso_hdrs)
Sunil Goutham143ceb02015-07-29 16:49:37 +0300508 dma_free_coherent(&nic->pdev->dev,
509 sq->dmem.q_len * TSO_HEADER_SIZE,
Sunil Goutham4863dea2015-05-26 19:20:15 -0700510 sq->tso_hdrs, sq->tso_hdrs_phys);
511
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530512 /* Free pending skbs in the queue */
513 smp_rmb();
514 while (sq->head != sq->tail) {
515 skb = (struct sk_buff *)sq->skbuff[sq->head];
Sunil Goutham83abb7d2017-03-07 18:09:08 +0530516 if (!skb)
517 goto next;
518 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
519 /* Check for dummy descriptor used for HW TSO offload on 88xx */
520 if (hdr->dont_send) {
521 /* Get actual TSO descriptors and unmap them */
522 tso_sqe =
523 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
524 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
525 tso_sqe->subdesc_cnt);
526 } else {
527 nicvf_unmap_sndq_buffers(nic, sq, sq->head,
528 hdr->subdesc_cnt);
529 }
530 dev_kfree_skb_any(skb);
531next:
Sunil Gouthamc94acf82016-11-15 17:38:29 +0530532 sq->head++;
533 sq->head &= (sq->dmem.q_len - 1);
534 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700535 kfree(sq->skbuff);
536 nicvf_free_q_desc_mem(nic, &sq->dmem);
537}
538
539static void nicvf_reclaim_snd_queue(struct nicvf *nic,
540 struct queue_set *qs, int qidx)
541{
542 /* Disable send queue */
543 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
544 /* Check if SQ is stopped */
545 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
546 return;
547 /* Reset send queue */
548 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
549}
550
551static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
552 struct queue_set *qs, int qidx)
553{
554 union nic_mbx mbx = {};
555
556 /* Make sure all packets in the pipeline are written back into mem */
557 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
558 nicvf_send_msg_to_pf(nic, &mbx);
559}
560
561static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
562 struct queue_set *qs, int qidx)
563{
564 /* Disable timer threshold (doesn't get reset upon CQ reset */
565 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
566 /* Disable completion queue */
567 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
568 /* Reset completion queue */
569 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
570}
571
572static void nicvf_reclaim_rbdr(struct nicvf *nic,
573 struct rbdr *rbdr, int qidx)
574{
575 u64 tmp, fifo_state;
576 int timeout = 10;
577
578 /* Save head and tail pointers for feeing up buffers */
579 rbdr->head = nicvf_queue_reg_read(nic,
580 NIC_QSET_RBDR_0_1_HEAD,
581 qidx) >> 3;
582 rbdr->tail = nicvf_queue_reg_read(nic,
583 NIC_QSET_RBDR_0_1_TAIL,
584 qidx) >> 3;
585
586 /* If RBDR FIFO is in 'FAIL' state then do a reset first
587 * before relaiming.
588 */
589 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
590 if (((fifo_state >> 62) & 0x03) == 0x3)
591 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
592 qidx, NICVF_RBDR_RESET);
593
594 /* Disable RBDR */
595 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
596 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
597 return;
598 while (1) {
599 tmp = nicvf_queue_reg_read(nic,
600 NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
601 qidx);
602 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
603 break;
604 usleep_range(1000, 2000);
605 timeout--;
606 if (!timeout) {
607 netdev_err(nic->netdev,
608 "Failed polling on prefetch status\n");
609 return;
610 }
611 }
612 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
613 qidx, NICVF_RBDR_RESET);
614
615 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
616 return;
617 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
618 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
619 return;
620}
621
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300622void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
623{
624 u64 rq_cfg;
625 int sqs;
626
627 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
628
629 /* Enable first VLAN stripping */
630 if (features & NETIF_F_HW_VLAN_CTAG_RX)
631 rq_cfg |= (1ULL << 25);
632 else
633 rq_cfg &= ~(1ULL << 25);
634 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
635
636 /* Configure Secondary Qsets, if any */
637 for (sqs = 0; sqs < nic->sqs_count; sqs++)
638 if (nic->snicvf[sqs])
639 nicvf_queue_reg_write(nic->snicvf[sqs],
640 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
641}
642
Jerin Jacob3458c402016-08-12 16:51:39 +0530643static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
644{
645 union nic_mbx mbx = {};
646
Sunil Goutham964cb692016-11-15 17:38:16 +0530647 /* Reset all RQ/SQ and VF stats */
Jerin Jacob3458c402016-08-12 16:51:39 +0530648 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
Sunil Goutham964cb692016-11-15 17:38:16 +0530649 mbx.reset_stat.rx_stat_mask = 0x3FFF;
650 mbx.reset_stat.tx_stat_mask = 0x1F;
Jerin Jacob3458c402016-08-12 16:51:39 +0530651 mbx.reset_stat.rq_stat_mask = 0xFFFF;
Sunil Goutham964cb692016-11-15 17:38:16 +0530652 mbx.reset_stat.sq_stat_mask = 0xFFFF;
Jerin Jacob3458c402016-08-12 16:51:39 +0530653 nicvf_send_msg_to_pf(nic, &mbx);
654}
655
Sunil Goutham4863dea2015-05-26 19:20:15 -0700656/* Configures receive queue */
657static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
658 int qidx, bool enable)
659{
660 union nic_mbx mbx = {};
661 struct rcv_queue *rq;
662 struct rq_cfg rq_cfg;
663
664 rq = &qs->rq[qidx];
665 rq->enable = enable;
666
667 /* Disable receive queue */
668 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
669
670 if (!rq->enable) {
671 nicvf_reclaim_rcv_queue(nic, qs, qidx);
672 return;
673 }
674
675 rq->cq_qs = qs->vnic_id;
676 rq->cq_idx = qidx;
677 rq->start_rbdr_qs = qs->vnic_id;
678 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
679 rq->cont_rbdr_qs = qs->vnic_id;
680 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
681 /* all writes of RBDR data to be loaded into L2 Cache as well*/
682 rq->caching = 1;
683
684 /* Send a mailbox msg to PF to config RQ */
685 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
686 mbx.rq.qs_num = qs->vnic_id;
687 mbx.rq.rq_num = qidx;
688 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
689 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
690 (rq->cont_qs_rbdr_idx << 8) |
691 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
692 nicvf_send_msg_to_pf(nic, &mbx);
693
694 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
Sunil Gouthamd5b2d7a2016-11-24 14:48:02 +0530695 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
696 (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
697 (qs->vnic_id << 0);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700698 nicvf_send_msg_to_pf(nic, &mbx);
699
700 /* RQ drop config
701 * Enable CQ drop to reserve sufficient CQEs for all tx packets
702 */
703 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
Sunil Gouthamd5b2d7a2016-11-24 14:48:02 +0530704 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
705 (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
706 (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700707 nicvf_send_msg_to_pf(nic, &mbx);
708
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530709 if (!nic->sqs_mode && (qidx == 0)) {
Thanneeru Srinivasulu36fa35d2017-03-07 18:09:11 +0530710 /* Enable checking L3/L4 length and TCP/UDP checksums
711 * Also allow IPv6 pkts with zero UDP checksum.
712 */
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530713 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
Thanneeru Srinivasulu36fa35d2017-03-07 18:09:11 +0530714 (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
Sunil Gouthamaa2e2592015-08-30 12:29:13 +0300715 nicvf_config_vlan_stripping(nic, nic->netdev->features);
Sunil Gouthamcadcf952016-11-15 17:37:54 +0530716 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700717
718 /* Enable Receive queue */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200719 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700720 rq_cfg.ena = 1;
721 rq_cfg.tcp_ena = 0;
722 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
723}
724
725/* Configures completion queue */
726void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
727 int qidx, bool enable)
728{
729 struct cmp_queue *cq;
730 struct cq_cfg cq_cfg;
731
732 cq = &qs->cq[qidx];
733 cq->enable = enable;
734
735 if (!cq->enable) {
736 nicvf_reclaim_cmp_queue(nic, qs, qidx);
737 return;
738 }
739
740 /* Reset completion queue */
741 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
742
743 if (!cq->enable)
744 return;
745
746 spin_lock_init(&cq->lock);
747 /* Set completion queue base address */
748 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
749 qidx, (u64)(cq->dmem.phys_base));
750
751 /* Enable Completion queue */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200752 memset(&cq_cfg, 0, sizeof(struct cq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700753 cq_cfg.ena = 1;
754 cq_cfg.reset = 0;
755 cq_cfg.caching = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530756 cq_cfg.qsize = ilog2(qs->cq_len >> 10);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700757 cq_cfg.avg_con = 0;
758 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
759
760 /* Set threshold value for interrupt generation */
761 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
762 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
Sunil Goutham006394a2015-12-02 15:36:15 +0530763 qidx, CMP_QUEUE_TIMER_THRESH);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700764}
765
766/* Configures transmit queue */
767static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
768 int qidx, bool enable)
769{
770 union nic_mbx mbx = {};
771 struct snd_queue *sq;
772 struct sq_cfg sq_cfg;
773
774 sq = &qs->sq[qidx];
775 sq->enable = enable;
776
777 if (!sq->enable) {
778 nicvf_reclaim_snd_queue(nic, qs, qidx);
779 return;
780 }
781
782 /* Reset send queue */
783 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
784
785 sq->cq_qs = qs->vnic_id;
786 sq->cq_idx = qidx;
787
788 /* Send a mailbox msg to PF to config SQ */
789 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
790 mbx.sq.qs_num = qs->vnic_id;
791 mbx.sq.sq_num = qidx;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300792 mbx.sq.sqs_mode = nic->sqs_mode;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700793 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
794 nicvf_send_msg_to_pf(nic, &mbx);
795
796 /* Set queue base address */
797 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
798 qidx, (u64)(sq->dmem.phys_base));
799
800 /* Enable send queue & set queue size */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200801 memset(&sq_cfg, 0, sizeof(struct sq_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700802 sq_cfg.ena = 1;
803 sq_cfg.reset = 0;
804 sq_cfg.ldwb = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530805 sq_cfg.qsize = ilog2(qs->sq_len >> 10);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700806 sq_cfg.tstmp_bgx_intf = 0;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530807 /* CQ's level at which HW will stop processing SQEs to avoid
808 * transmitting a pkt with no space in CQ to post CQE_TX.
809 */
810 sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700811 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
812
813 /* Set threshold value for interrupt generation */
814 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
815
816 /* Set queue:cpu affinity for better load distribution */
817 if (cpu_online(qidx)) {
818 cpumask_set_cpu(qidx, &sq->affinity_mask);
819 netif_set_xps_queue(nic->netdev,
820 &sq->affinity_mask, qidx);
821 }
822}
823
824/* Configures receive buffer descriptor ring */
825static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
826 int qidx, bool enable)
827{
828 struct rbdr *rbdr;
829 struct rbdr_cfg rbdr_cfg;
830
831 rbdr = &qs->rbdr[qidx];
832 nicvf_reclaim_rbdr(nic, rbdr, qidx);
833 if (!enable)
834 return;
835
836 /* Set descriptor base address */
837 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
838 qidx, (u64)(rbdr->dmem.phys_base));
839
840 /* Enable RBDR & set queue size */
841 /* Buffer size should be in multiples of 128 bytes */
xypron.glpk@gmx.de161de2c2016-05-09 00:46:18 +0200842 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
Sunil Goutham4863dea2015-05-26 19:20:15 -0700843 rbdr_cfg.ena = 1;
844 rbdr_cfg.reset = 0;
845 rbdr_cfg.ldwb = 0;
846 rbdr_cfg.qsize = RBDR_SIZE;
847 rbdr_cfg.avg_con = 0;
848 rbdr_cfg.lines = rbdr->dma_size / 128;
849 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
850 qidx, *(u64 *)&rbdr_cfg);
851
852 /* Notify HW */
853 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
854 qidx, qs->rbdr_len - 1);
855
856 /* Set threshold value for interrupt generation */
857 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
858 qidx, rbdr->thresh - 1);
859}
860
861/* Requests PF to assign and enable Qset */
862void nicvf_qset_config(struct nicvf *nic, bool enable)
863{
864 union nic_mbx mbx = {};
865 struct queue_set *qs = nic->qs;
866 struct qs_cfg *qs_cfg;
867
868 if (!qs) {
869 netdev_warn(nic->netdev,
870 "Qset is still not allocated, don't init queues\n");
871 return;
872 }
873
874 qs->enable = enable;
875 qs->vnic_id = nic->vf_id;
876
877 /* Send a mailbox msg to PF to config Qset */
878 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
879 mbx.qs.num = qs->vnic_id;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300880 mbx.qs.sqs_count = nic->sqs_count;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700881
882 mbx.qs.cfg = 0;
883 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
884 if (qs->enable) {
885 qs_cfg->ena = 1;
886#ifdef __BIG_ENDIAN
887 qs_cfg->be = 1;
888#endif
889 qs_cfg->vnic = qs->vnic_id;
890 }
891 nicvf_send_msg_to_pf(nic, &mbx);
892}
893
894static void nicvf_free_resources(struct nicvf *nic)
895{
896 int qidx;
897 struct queue_set *qs = nic->qs;
898
899 /* Free receive buffer descriptor ring */
900 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
901 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
902
903 /* Free completion queue */
904 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
905 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
906
907 /* Free send queue */
908 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
909 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
910}
911
912static int nicvf_alloc_resources(struct nicvf *nic)
913{
914 int qidx;
915 struct queue_set *qs = nic->qs;
916
917 /* Alloc receive buffer descriptor ring */
918 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
919 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
920 DMA_BUFFER_LEN))
921 goto alloc_fail;
922 }
923
924 /* Alloc send queue */
925 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
926 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
927 goto alloc_fail;
928 }
929
930 /* Alloc completion queue */
931 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
932 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
933 goto alloc_fail;
934 }
935
936 return 0;
937alloc_fail:
938 nicvf_free_resources(nic);
939 return -ENOMEM;
940}
941
942int nicvf_set_qset_resources(struct nicvf *nic)
943{
944 struct queue_set *qs;
945
946 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
947 if (!qs)
948 return -ENOMEM;
949 nic->qs = qs;
950
951 /* Set count of each queue */
Sunil Goutham3a397eb2016-08-12 16:51:27 +0530952 qs->rbdr_cnt = DEFAULT_RBDR_CNT;
953 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
954 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
955 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700956
957 /* Set queue lengths */
958 qs->rbdr_len = RCV_BUF_COUNT;
959 qs->sq_len = SND_QUEUE_LEN;
960 qs->cq_len = CMP_QUEUE_LEN;
Sunil Goutham92dc8762015-08-30 12:29:15 +0300961
962 nic->rx_queues = qs->rq_cnt;
963 nic->tx_queues = qs->sq_cnt;
964
Sunil Goutham4863dea2015-05-26 19:20:15 -0700965 return 0;
966}
967
968int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
969{
970 bool disable = false;
971 struct queue_set *qs = nic->qs;
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530972 struct queue_set *pqs = nic->pnicvf->qs;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700973 int qidx;
974
975 if (!qs)
976 return 0;
977
Sunil Gouthamfff4ffd2017-01-25 17:36:23 +0530978 /* Take primary VF's queue lengths.
979 * This is needed to take queue lengths set from ethtool
980 * into consideration.
981 */
982 if (nic->sqs_mode && pqs) {
983 qs->cq_len = pqs->cq_len;
984 qs->sq_len = pqs->sq_len;
985 }
986
Sunil Goutham4863dea2015-05-26 19:20:15 -0700987 if (enable) {
988 if (nicvf_alloc_resources(nic))
989 return -ENOMEM;
990
991 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
992 nicvf_snd_queue_config(nic, qs, qidx, enable);
993 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
994 nicvf_cmp_queue_config(nic, qs, qidx, enable);
995 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
996 nicvf_rbdr_config(nic, qs, qidx, enable);
997 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
998 nicvf_rcv_queue_config(nic, qs, qidx, enable);
999 } else {
1000 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1001 nicvf_rcv_queue_config(nic, qs, qidx, disable);
1002 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1003 nicvf_rbdr_config(nic, qs, qidx, disable);
1004 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1005 nicvf_snd_queue_config(nic, qs, qidx, disable);
1006 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1007 nicvf_cmp_queue_config(nic, qs, qidx, disable);
1008
1009 nicvf_free_resources(nic);
1010 }
1011
Jerin Jacob3458c402016-08-12 16:51:39 +05301012 /* Reset RXQ's stats.
1013 * SQ's stats will get reset automatically once SQ is reset.
1014 */
1015 nicvf_reset_rcv_queue_stats(nic);
1016
Sunil Goutham4863dea2015-05-26 19:20:15 -07001017 return 0;
1018}
1019
1020/* Get a free desc from SQ
1021 * returns descriptor ponter & descriptor number
1022 */
1023static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1024{
1025 int qentry;
1026
1027 qentry = sq->tail;
1028 atomic_sub(desc_cnt, &sq->free_cnt);
1029 sq->tail += desc_cnt;
1030 sq->tail &= (sq->dmem.q_len - 1);
1031
1032 return qentry;
1033}
1034
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301035/* Rollback to previous tail pointer when descriptors not used */
1036static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
1037 int qentry, int desc_cnt)
1038{
1039 sq->tail = qentry;
1040 atomic_add(desc_cnt, &sq->free_cnt);
1041}
1042
Sunil Goutham4863dea2015-05-26 19:20:15 -07001043/* Free descriptor back to SQ for future use */
1044void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1045{
1046 atomic_add(desc_cnt, &sq->free_cnt);
1047 sq->head += desc_cnt;
1048 sq->head &= (sq->dmem.q_len - 1);
1049}
1050
1051static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1052{
1053 qentry++;
1054 qentry &= (sq->dmem.q_len - 1);
1055 return qentry;
1056}
1057
1058void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1059{
1060 u64 sq_cfg;
1061
1062 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1063 sq_cfg |= NICVF_SQ_EN;
1064 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1065 /* Ring doorbell so that H/W restarts processing SQEs */
1066 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1067}
1068
1069void nicvf_sq_disable(struct nicvf *nic, int qidx)
1070{
1071 u64 sq_cfg;
1072
1073 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1074 sq_cfg &= ~NICVF_SQ_EN;
1075 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1076}
1077
1078void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
1079 int qidx)
1080{
1081 u64 head, tail;
1082 struct sk_buff *skb;
1083 struct nicvf *nic = netdev_priv(netdev);
1084 struct sq_hdr_subdesc *hdr;
1085
1086 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1087 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1088 while (sq->head != head) {
1089 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1090 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1091 nicvf_put_sq_desc(sq, 1);
1092 continue;
1093 }
1094 skb = (struct sk_buff *)sq->skbuff[sq->head];
Sunil Goutham143ceb02015-07-29 16:49:37 +03001095 if (skb)
1096 dev_kfree_skb_any(skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001097 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
1098 atomic64_add(hdr->tot_len,
1099 (atomic64_t *)&netdev->stats.tx_bytes);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001100 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1101 }
1102}
1103
1104/* Calculate no of SQ subdescriptors needed to transmit all
1105 * segments of this TSO packet.
1106 * Taken from 'Tilera network driver' with a minor modification.
1107 */
1108static int nicvf_tso_count_subdescs(struct sk_buff *skb)
1109{
1110 struct skb_shared_info *sh = skb_shinfo(skb);
1111 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1112 unsigned int data_len = skb->len - sh_len;
1113 unsigned int p_len = sh->gso_size;
1114 long f_id = -1; /* id of the current fragment */
1115 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1116 long f_used = 0; /* bytes used from the current fragment */
1117 long n; /* size of the current piece of payload */
1118 int num_edescs = 0;
1119 int segment;
1120
1121 for (segment = 0; segment < sh->gso_segs; segment++) {
1122 unsigned int p_used = 0;
1123
1124 /* One edesc for header and for each piece of the payload. */
1125 for (num_edescs++; p_used < p_len; num_edescs++) {
1126 /* Advance as needed. */
1127 while (f_used >= f_size) {
1128 f_id++;
1129 f_size = skb_frag_size(&sh->frags[f_id]);
1130 f_used = 0;
1131 }
1132
1133 /* Use bytes from the current fragment. */
1134 n = p_len - p_used;
1135 if (n > f_size - f_used)
1136 n = f_size - f_used;
1137 f_used += n;
1138 p_used += n;
1139 }
1140
1141 /* The last segment may be less than gso_size. */
1142 data_len -= p_len;
1143 if (data_len < p_len)
1144 p_len = data_len;
1145 }
1146
1147 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
1148 return num_edescs + sh->gso_segs;
1149}
1150
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301151#define POST_CQE_DESC_COUNT 2
1152
Sunil Goutham4863dea2015-05-26 19:20:15 -07001153/* Get the number of SQ descriptors needed to xmit this skb */
1154static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
1155{
1156 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
1157
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301158 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001159 subdesc_cnt = nicvf_tso_count_subdescs(skb);
1160 return subdesc_cnt;
1161 }
1162
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301163 /* Dummy descriptors to get TSO pkt completion notification */
1164 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
1165 subdesc_cnt += POST_CQE_DESC_COUNT;
1166
Sunil Goutham4863dea2015-05-26 19:20:15 -07001167 if (skb_shinfo(skb)->nr_frags)
1168 subdesc_cnt += skb_shinfo(skb)->nr_frags;
1169
1170 return subdesc_cnt;
1171}
1172
1173/* Add SQ HEADER subdescriptor.
1174 * First subdescriptor for every send descriptor.
1175 */
1176static inline void
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301177nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001178 int subdesc_cnt, struct sk_buff *skb, int len)
1179{
1180 int proto;
1181 struct sq_hdr_subdesc *hdr;
Thanneeru Srinivasulu3a9024f2017-04-06 16:12:26 +05301182 union {
1183 struct iphdr *v4;
1184 struct ipv6hdr *v6;
1185 unsigned char *hdr;
1186 } ip;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001187
Thanneeru Srinivasulu3a9024f2017-04-06 16:12:26 +05301188 ip.hdr = skb_network_header(skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001189 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001190 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1191 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301192
1193 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
1194 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
1195 * segment transmitted on 88xx.
1196 */
1197 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
1198 } else {
1199 sq->skbuff[qentry] = (u64)skb;
1200 /* Enable notification via CQE after processing SQE */
1201 hdr->post_cqe = 1;
1202 /* No of subdescriptors following this */
1203 hdr->subdesc_cnt = subdesc_cnt;
1204 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001205 hdr->tot_len = len;
1206
1207 /* Offload checksum calculation to HW */
1208 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sunil Goutham4863dea2015-05-26 19:20:15 -07001209 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1210 hdr->l3_offset = skb_network_offset(skb);
1211 hdr->l4_offset = skb_transport_offset(skb);
1212
Thanneeru Srinivasulu3a9024f2017-04-06 16:12:26 +05301213 proto = (ip.v4->version == 4) ? ip.v4->protocol :
1214 ip.v6->nexthdr;
1215
Sunil Goutham4863dea2015-05-26 19:20:15 -07001216 switch (proto) {
1217 case IPPROTO_TCP:
1218 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1219 break;
1220 case IPPROTO_UDP:
1221 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1222 break;
1223 case IPPROTO_SCTP:
1224 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1225 break;
1226 }
1227 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301228
1229 if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
1230 hdr->tso = 1;
1231 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
1232 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1233 /* For non-tunneled pkts, point this to L2 ethertype */
1234 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
Sunil Goutham964cb692016-11-15 17:38:16 +05301235 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301236 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001237}
1238
1239/* SQ GATHER subdescriptor
1240 * Must follow HDR descriptor
1241 */
1242static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1243 int size, u64 data)
1244{
1245 struct sq_gather_subdesc *gather;
1246
1247 qentry &= (sq->dmem.q_len - 1);
1248 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1249
1250 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1251 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
Sunil Goutham4b561c12015-07-29 16:49:36 +03001252 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001253 gather->size = size;
1254 gather->addr = data;
1255}
1256
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301257/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1258 * packet so that a CQE is posted as a notifation for transmission of
1259 * TSO packet.
1260 */
1261static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1262 int tso_sqe, struct sk_buff *skb)
1263{
1264 struct sq_imm_subdesc *imm;
1265 struct sq_hdr_subdesc *hdr;
1266
1267 sq->skbuff[qentry] = (u64)skb;
1268
1269 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1270 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1271 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1272 /* Enable notification via CQE after processing SQE */
1273 hdr->post_cqe = 1;
1274 /* There is no packet to transmit here */
1275 hdr->dont_send = 1;
1276 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1277 hdr->tot_len = 1;
1278 /* Actual TSO header SQE index, needed for cleanup */
1279 hdr->rsvd2 = tso_sqe;
1280
1281 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1282 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1283 memset(imm, 0, SND_QUEUE_DESC_SIZE);
1284 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1285 imm->len = 1;
1286}
1287
Sunil Goutham2c204c22016-09-23 14:42:28 +05301288static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
1289 int sq_num, int desc_cnt)
1290{
1291 struct netdev_queue *txq;
1292
1293 txq = netdev_get_tx_queue(nic->pnicvf->netdev,
1294 skb_get_queue_mapping(skb));
1295
1296 netdev_tx_sent_queue(txq, skb->len);
1297
1298 /* make sure all memory stores are done before ringing doorbell */
1299 smp_wmb();
1300
1301 /* Inform HW to xmit all TSO segments */
1302 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1303 sq_num, desc_cnt);
1304}
1305
Sunil Goutham4863dea2015-05-26 19:20:15 -07001306/* Segment a TSO packet into 'gso_size' segments and append
1307 * them to SQ for transfer
1308 */
1309static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
Sunil Goutham92dc8762015-08-30 12:29:15 +03001310 int sq_num, int qentry, struct sk_buff *skb)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001311{
1312 struct tso_t tso;
1313 int seg_subdescs = 0, desc_cnt = 0;
1314 int seg_len, total_len, data_left;
1315 int hdr_qentry = qentry;
1316 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1317
1318 tso_start(skb, &tso);
1319 total_len = skb->len - hdr_len;
1320 while (total_len > 0) {
1321 char *hdr;
1322
1323 /* Save Qentry for adding HDR_SUBDESC at the end */
1324 hdr_qentry = qentry;
1325
1326 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1327 total_len -= data_left;
1328
1329 /* Add segment's header */
1330 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1331 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1332 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1333 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1334 sq->tso_hdrs_phys +
1335 qentry * TSO_HEADER_SIZE);
1336 /* HDR_SUDESC + GATHER */
1337 seg_subdescs = 2;
1338 seg_len = hdr_len;
1339
1340 /* Add segment's payload fragments */
1341 while (data_left > 0) {
1342 int size;
1343
1344 size = min_t(int, tso.size, data_left);
1345
1346 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1347 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1348 virt_to_phys(tso.data));
1349 seg_subdescs++;
1350 seg_len += size;
1351
1352 data_left -= size;
1353 tso_build_data(skb, &tso, size);
1354 }
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301355 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001356 seg_subdescs - 1, skb, seg_len);
Sunil Goutham143ceb02015-07-29 16:49:37 +03001357 sq->skbuff[hdr_qentry] = (u64)NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001358 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1359
1360 desc_cnt += seg_subdescs;
1361 }
1362 /* Save SKB in the last segment for freeing */
1363 sq->skbuff[hdr_qentry] = (u64)skb;
1364
Sunil Goutham2c204c22016-09-23 14:42:28 +05301365 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001366
Sunil Goutham964cb692016-11-15 17:38:16 +05301367 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001368 return 1;
1369}
1370
1371/* Append an skb to a SQ for packet transfer. */
Sunil Gouthambd3ad7d2016-12-01 18:24:28 +05301372int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1373 struct sk_buff *skb, u8 sq_num)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001374{
1375 int i, size;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301376 int subdesc_cnt, hdr_sqe = 0;
Sunil Gouthambd3ad7d2016-12-01 18:24:28 +05301377 int qentry;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301378 u64 dma_addr;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001379
1380 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1381 if (subdesc_cnt > atomic_read(&sq->free_cnt))
1382 goto append_fail;
1383
1384 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1385
1386 /* Check if its a TSO packet */
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301387 if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
Sunil Goutham92dc8762015-08-30 12:29:15 +03001388 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001389
1390 /* Add SQ header subdesc */
Sunil Goutham40fb5f82015-12-10 13:25:19 +05301391 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1392 skb, skb->len);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301393 hdr_sqe = qentry;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001394
1395 /* Add SQ gather subdescs */
1396 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1397 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301398 /* HW will ensure data coherency, CPU sync not required */
1399 dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
1400 offset_in_page(skb->data), size,
1401 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1402 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1403 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1404 return 0;
1405 }
1406
1407 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001408
1409 /* Check for scattered buffer */
1410 if (!skb_is_nonlinear(skb))
1411 goto doorbell;
1412
1413 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1414 const struct skb_frag_struct *frag;
1415
1416 frag = &skb_shinfo(skb)->frags[i];
1417
1418 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1419 size = skb_frag_size(frag);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301420 dma_addr = dma_map_page_attrs(&nic->pdev->dev,
1421 skb_frag_page(frag),
1422 frag->page_offset, size,
1423 DMA_TO_DEVICE,
1424 DMA_ATTR_SKIP_CPU_SYNC);
1425 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1426 /* Free entire chain of mapped buffers
1427 * here 'i' = frags mapped + above mapped skb->data
1428 */
1429 nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
1430 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1431 return 0;
1432 }
1433 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001434 }
1435
1436doorbell:
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301437 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1438 qentry = nicvf_get_nxt_sqentry(sq, qentry);
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301439 nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
Sunil Goutham7ceb8a12016-08-30 11:36:27 +05301440 }
1441
Sunil Goutham2c204c22016-09-23 14:42:28 +05301442 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001443
Sunil Goutham4863dea2015-05-26 19:20:15 -07001444 return 1;
1445
1446append_fail:
Sunil Goutham92dc8762015-08-30 12:29:15 +03001447 /* Use original PCI dev for debug log */
1448 nic = nic->pnicvf;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001449 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1450 return 0;
1451}
1452
1453static inline unsigned frag_num(unsigned i)
1454{
1455#ifdef __BIG_ENDIAN
1456 return (i & ~3) + 3 - (i & 3);
1457#else
1458 return i;
1459#endif
1460}
1461
1462/* Returns SKB for a received packet */
1463struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1464{
1465 int frag;
1466 int payload_len = 0;
1467 struct sk_buff *skb = NULL;
Sunil Gouthama8671ac2016-08-12 16:51:37 +05301468 struct page *page;
1469 int offset;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001470 u16 *rb_lens = NULL;
1471 u64 *rb_ptrs = NULL;
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301472 u64 phys_addr;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001473
1474 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
Sunil Goutham02a72bd2016-08-12 16:51:28 +05301475 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
1476 * CQE_RX at word6, hence buffer pointers move by word
1477 *
1478 * Use existing 'hw_tso' flag which will be set for all chips
1479 * except 88xx pass1 instead of a additional cache line
1480 * access (or miss) by using pci dev's revision.
1481 */
1482 if (!nic->hw_tso)
1483 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1484 else
1485 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
Sunil Goutham4863dea2015-05-26 19:20:15 -07001486
Sunil Goutham4863dea2015-05-26 19:20:15 -07001487 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1488 payload_len = rb_lens[frag_num(frag)];
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301489 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
1490 if (!phys_addr) {
1491 if (skb)
1492 dev_kfree_skb_any(skb);
1493 return NULL;
1494 }
1495
Sunil Goutham4863dea2015-05-26 19:20:15 -07001496 if (!frag) {
1497 /* First fragment */
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301498 dma_unmap_page_attrs(&nic->pdev->dev,
1499 *rb_ptrs - cqe_rx->align_pad,
1500 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1501 DMA_ATTR_SKIP_CPU_SYNC);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001502 skb = nicvf_rb_ptr_to_skb(nic,
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301503 phys_addr - cqe_rx->align_pad,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001504 payload_len);
1505 if (!skb)
1506 return NULL;
1507 skb_reserve(skb, cqe_rx->align_pad);
1508 skb_put(skb, payload_len);
1509 } else {
1510 /* Add fragments */
Sunil Goutham83abb7d2017-03-07 18:09:08 +05301511 dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
1512 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1513 DMA_ATTR_SKIP_CPU_SYNC);
1514 page = virt_to_page(phys_to_virt(phys_addr));
1515 offset = phys_to_virt(phys_addr) - page_address(page);
Sunil Gouthama8671ac2016-08-12 16:51:37 +05301516 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1517 offset, payload_len, RCV_FRAG_LEN);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001518 }
1519 /* Next buffer pointer */
1520 rb_ptrs++;
1521 }
1522 return skb;
1523}
1524
Yury Norovb45ceb42015-12-07 10:30:32 +05301525static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001526{
1527 u64 reg_val;
1528
Sunil Goutham4863dea2015-05-26 19:20:15 -07001529 switch (int_type) {
1530 case NICVF_INTR_CQ:
1531 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1532 break;
1533 case NICVF_INTR_SQ:
1534 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1535 break;
1536 case NICVF_INTR_RBDR:
1537 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1538 break;
1539 case NICVF_INTR_PKT_DROP:
1540 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1541 break;
1542 case NICVF_INTR_TCP_TIMER:
1543 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1544 break;
1545 case NICVF_INTR_MBOX:
1546 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1547 break;
1548 case NICVF_INTR_QS_ERR:
Yury Norovb45ceb42015-12-07 10:30:32 +05301549 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001550 break;
1551 default:
Yury Norovb45ceb42015-12-07 10:30:32 +05301552 reg_val = 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001553 }
1554
Yury Norovb45ceb42015-12-07 10:30:32 +05301555 return reg_val;
1556}
1557
1558/* Enable interrupt */
1559void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1560{
1561 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1562
1563 if (!mask) {
1564 netdev_dbg(nic->netdev,
1565 "Failed to enable interrupt: unknown type\n");
1566 return;
1567 }
1568 nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1569 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
1570}
1571
1572/* Disable interrupt */
1573void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1574{
1575 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1576
1577 if (!mask) {
1578 netdev_dbg(nic->netdev,
1579 "Failed to disable interrupt: unknown type\n");
1580 return;
1581 }
1582
1583 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
1584}
1585
1586/* Clear interrupt */
1587void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1588{
1589 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1590
1591 if (!mask) {
1592 netdev_dbg(nic->netdev,
1593 "Failed to clear interrupt: unknown type\n");
1594 return;
1595 }
1596
1597 nicvf_reg_write(nic, NIC_VF_INT, mask);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001598}
1599
1600/* Check if interrupt is enabled */
1601int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1602{
Yury Norovb45ceb42015-12-07 10:30:32 +05301603 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1604 /* If interrupt type is unknown, we treat it disabled. */
1605 if (!mask) {
1606 netdev_dbg(nic->netdev,
Sunil Goutham4863dea2015-05-26 19:20:15 -07001607 "Failed to check interrupt enable: unknown type\n");
Yury Norovb45ceb42015-12-07 10:30:32 +05301608 return 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001609 }
1610
Yury Norovb45ceb42015-12-07 10:30:32 +05301611 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001612}
1613
1614void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1615{
1616 struct rcv_queue *rq;
1617
1618#define GET_RQ_STATS(reg) \
1619 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1620 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1621
1622 rq = &nic->qs->rq[rq_idx];
1623 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1624 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1625}
1626
1627void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1628{
1629 struct snd_queue *sq;
1630
1631#define GET_SQ_STATS(reg) \
1632 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1633 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1634
1635 sq = &nic->qs->sq[sq_idx];
1636 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1637 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1638}
1639
1640/* Check for errors in the receive cmp.queue entry */
Sunil Gouthamad2eceb2016-02-16 16:29:51 +05301641int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001642{
Sunil Gouthamad2eceb2016-02-16 16:29:51 +05301643 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001644 return 0;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001645
1646 if (netif_msg_rx_err(nic))
1647 netdev_err(nic->netdev,
1648 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1649 nic->netdev->name,
1650 cqe_rx->err_level, cqe_rx->err_opcode);
1651
Sunil Goutham4863dea2015-05-26 19:20:15 -07001652 switch (cqe_rx->err_opcode) {
1653 case CQ_RX_ERROP_RE_PARTIAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301654 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001655 break;
1656 case CQ_RX_ERROP_RE_JABBER:
Sunil Goutham964cb692016-11-15 17:38:16 +05301657 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001658 break;
1659 case CQ_RX_ERROP_RE_FCS:
Sunil Goutham964cb692016-11-15 17:38:16 +05301660 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001661 break;
1662 case CQ_RX_ERROP_RE_RX_CTL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301663 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001664 break;
1665 case CQ_RX_ERROP_PREL2_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301666 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001667 break;
1668 case CQ_RX_ERROP_L2_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301669 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001670 break;
1671 case CQ_RX_ERROP_L2_OVERSIZE:
Sunil Goutham964cb692016-11-15 17:38:16 +05301672 this_cpu_inc(nic->drv_stats->rx_oversize);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001673 break;
1674 case CQ_RX_ERROP_L2_UNDERSIZE:
Sunil Goutham964cb692016-11-15 17:38:16 +05301675 this_cpu_inc(nic->drv_stats->rx_undersize);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001676 break;
1677 case CQ_RX_ERROP_L2_LENMISM:
Sunil Goutham964cb692016-11-15 17:38:16 +05301678 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001679 break;
1680 case CQ_RX_ERROP_L2_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301681 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001682 break;
1683 case CQ_RX_ERROP_IP_NOT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301684 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001685 break;
1686 case CQ_RX_ERROP_IP_CSUM_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301687 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001688 break;
1689 case CQ_RX_ERROP_IP_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301690 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001691 break;
1692 case CQ_RX_ERROP_IP_MALD:
Sunil Goutham964cb692016-11-15 17:38:16 +05301693 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001694 break;
1695 case CQ_RX_ERROP_IP_HOP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301696 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001697 break;
1698 case CQ_RX_ERROP_L3_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301699 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001700 break;
1701 case CQ_RX_ERROP_L4_MAL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301702 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001703 break;
1704 case CQ_RX_ERROP_L4_CHK:
Sunil Goutham964cb692016-11-15 17:38:16 +05301705 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001706 break;
1707 case CQ_RX_ERROP_UDP_LEN:
Sunil Goutham964cb692016-11-15 17:38:16 +05301708 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001709 break;
1710 case CQ_RX_ERROP_L4_PORT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301711 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001712 break;
1713 case CQ_RX_ERROP_TCP_FLAG:
Sunil Goutham964cb692016-11-15 17:38:16 +05301714 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001715 break;
1716 case CQ_RX_ERROP_TCP_OFFSET:
Sunil Goutham964cb692016-11-15 17:38:16 +05301717 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001718 break;
1719 case CQ_RX_ERROP_L4_PCLP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301720 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001721 break;
1722 case CQ_RX_ERROP_RBDR_TRUNC:
Sunil Goutham964cb692016-11-15 17:38:16 +05301723 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001724 break;
1725 }
1726
1727 return 1;
1728}
1729
1730/* Check for errors in the send cmp.queue entry */
Sunil Goutham964cb692016-11-15 17:38:16 +05301731int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001732{
Sunil Goutham4863dea2015-05-26 19:20:15 -07001733 switch (cqe_tx->send_status) {
1734 case CQ_TX_ERROP_GOOD:
Sunil Goutham4863dea2015-05-26 19:20:15 -07001735 return 0;
1736 case CQ_TX_ERROP_DESC_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301737 this_cpu_inc(nic->drv_stats->tx_desc_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001738 break;
1739 case CQ_TX_ERROP_HDR_CONS_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301740 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001741 break;
1742 case CQ_TX_ERROP_SUBDC_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301743 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001744 break;
Sunil Goutham712c3182016-11-15 17:37:36 +05301745 case CQ_TX_ERROP_MAX_SIZE_VIOL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301746 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
Sunil Goutham712c3182016-11-15 17:37:36 +05301747 break;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001748 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
Sunil Goutham964cb692016-11-15 17:38:16 +05301749 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001750 break;
1751 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301752 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001753 break;
1754 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
Sunil Goutham964cb692016-11-15 17:38:16 +05301755 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001756 break;
1757 case CQ_TX_ERROP_LOCK_VIOL:
Sunil Goutham964cb692016-11-15 17:38:16 +05301758 this_cpu_inc(nic->drv_stats->tx_lock_viol);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001759 break;
1760 case CQ_TX_ERROP_DATA_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301761 this_cpu_inc(nic->drv_stats->tx_data_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001762 break;
1763 case CQ_TX_ERROP_TSTMP_CONFLICT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301764 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001765 break;
1766 case CQ_TX_ERROP_TSTMP_TIMEOUT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301767 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001768 break;
1769 case CQ_TX_ERROP_MEM_FAULT:
Sunil Goutham964cb692016-11-15 17:38:16 +05301770 this_cpu_inc(nic->drv_stats->tx_mem_fault);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001771 break;
1772 case CQ_TX_ERROP_CK_OVERLAP:
Sunil Goutham964cb692016-11-15 17:38:16 +05301773 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001774 break;
1775 case CQ_TX_ERROP_CK_OFLOW:
Sunil Goutham964cb692016-11-15 17:38:16 +05301776 this_cpu_inc(nic->drv_stats->tx_csum_overflow);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001777 break;
1778 }
1779
1780 return 1;
1781}