blob: 11478c912b1dcc6d917c490ea2b2799048a18f11 [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11
12#include <linux/stringify.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/errno.h>
16#include <linux/ioport.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/interrupt.h>
20#include <linux/pci.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/dma-mapping.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/delay.h>
29#include <asm/byteorder.h>
30#include <asm/page.h>
31#include <linux/time.h>
32#include <linux/mii.h>
33#include <linux/if.h>
34#include <linux/if_vlan.h>
35#include <net/ip.h>
36#include <net/tcp.h>
37#include <net/udp.h>
38#include <net/checksum.h>
39#include <net/ip6_checksum.h>
40#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
41#include <net/vxlan.h>
42#endif
43#ifdef CONFIG_NET_RX_BUSY_POLL
44#include <net/busy_poll.h>
45#endif
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
52#include <linux/cpu_rmap.h>
53
54#include "bnxt_hsi.h"
55#include "bnxt.h"
56#include "bnxt_sriov.h"
57#include "bnxt_ethtool.h"
58
59#define BNXT_TX_TIMEOUT (5 * HZ)
60
61static const char version[] =
62 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
63
64MODULE_LICENSE("GPL");
65MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
66MODULE_VERSION(DRV_MODULE_VERSION);
67
68#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70#define BNXT_RX_COPY_THRESH 256
71
72#define BNXT_TX_PUSH_THRESH 92
73
74enum board_idx {
David Christensenfbc9a522015-12-27 18:19:29 -050075 BCM57301,
Michael Chanc0c050c2015-10-22 16:01:17 -040076 BCM57302,
77 BCM57304,
David Christensenfbc9a522015-12-27 18:19:29 -050078 BCM57402,
Michael Chanc0c050c2015-10-22 16:01:17 -040079 BCM57404,
80 BCM57406,
81 BCM57304_VF,
82 BCM57404_VF,
83};
84
85/* indexed by enum above */
86static const struct {
87 char *name;
88} board_info[] = {
David Christensenfbc9a522015-12-27 18:19:29 -050089 { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
90 { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
Michael Chanc0c050c2015-10-22 16:01:17 -040091 { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
David Christensenfbc9a522015-12-27 18:19:29 -050092 { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
Michael Chanc0c050c2015-10-22 16:01:17 -040093 { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
David Christensenfbc9a522015-12-27 18:19:29 -050094 { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
Michael Chanc0c050c2015-10-22 16:01:17 -040095 { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
96 { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
97};
98
99static const struct pci_device_id bnxt_pci_tbl[] = {
David Christensenfbc9a522015-12-27 18:19:29 -0500100 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400101 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
102 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
David Christensenfbc9a522015-12-27 18:19:29 -0500103 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400104 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
105 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
106#ifdef CONFIG_BNXT_SRIOV
107 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
108 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
109#endif
110 { 0 }
111};
112
113MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
114
115static const u16 bnxt_vf_req_snif[] = {
116 HWRM_FUNC_CFG,
117 HWRM_PORT_PHY_QCFG,
118 HWRM_CFA_L2_FILTER_ALLOC,
119};
120
121static bool bnxt_vf_pciid(enum board_idx idx)
122{
123 return (idx == BCM57304_VF || idx == BCM57404_VF);
124}
125
126#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
127#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
128#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
129
130#define BNXT_CP_DB_REARM(db, raw_cons) \
131 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
132
133#define BNXT_CP_DB(db, raw_cons) \
134 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
135
136#define BNXT_CP_DB_IRQ_DIS(db) \
137 writel(DB_CP_IRQ_DIS_FLAGS, db)
138
139static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
140{
141 /* Tell compiler to fetch tx indices from memory. */
142 barrier();
143
144 return bp->tx_ring_size -
145 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
146}
147
148static const u16 bnxt_lhint_arr[] = {
149 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
150 TX_BD_FLAGS_LHINT_512_TO_1023,
151 TX_BD_FLAGS_LHINT_1024_TO_2047,
152 TX_BD_FLAGS_LHINT_1024_TO_2047,
153 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
154 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
155 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
156 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
157 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
158 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
159 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
160 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
161 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
162 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
163 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
164 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
165 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
166 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
167 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
168};
169
170static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
171{
172 struct bnxt *bp = netdev_priv(dev);
173 struct tx_bd *txbd;
174 struct tx_bd_ext *txbd1;
175 struct netdev_queue *txq;
176 int i;
177 dma_addr_t mapping;
178 unsigned int length, pad = 0;
179 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
180 u16 prod, last_frag;
181 struct pci_dev *pdev = bp->pdev;
Michael Chanc0c050c2015-10-22 16:01:17 -0400182 struct bnxt_tx_ring_info *txr;
183 struct bnxt_sw_tx_bd *tx_buf;
184
185 i = skb_get_queue_mapping(skb);
186 if (unlikely(i >= bp->tx_nr_rings)) {
187 dev_kfree_skb_any(skb);
188 return NETDEV_TX_OK;
189 }
190
Michael Chanb6ab4b02016-01-02 23:44:59 -0500191 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -0400192 txq = netdev_get_tx_queue(dev, i);
193 prod = txr->tx_prod;
194
195 free_size = bnxt_tx_avail(bp, txr);
196 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
197 netif_tx_stop_queue(txq);
198 return NETDEV_TX_BUSY;
199 }
200
201 length = skb->len;
202 len = skb_headlen(skb);
203 last_frag = skb_shinfo(skb)->nr_frags;
204
205 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
206
207 txbd->tx_bd_opaque = prod;
208
209 tx_buf = &txr->tx_buf_ring[prod];
210 tx_buf->skb = skb;
211 tx_buf->nr_frags = last_frag;
212
213 vlan_tag_flags = 0;
214 cfa_action = 0;
215 if (skb_vlan_tag_present(skb)) {
216 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
217 skb_vlan_tag_get(skb);
218 /* Currently supports 8021Q, 8021AD vlan offloads
219 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
220 */
221 if (skb->vlan_proto == htons(ETH_P_8021Q))
222 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
223 }
224
225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
226 struct tx_push_bd *push = txr->tx_push;
227 struct tx_bd *tx_push = &push->txbd1;
228 struct tx_bd_ext *tx_push1 = &push->txbd2;
229 void *pdata = tx_push1 + 1;
230 int j;
231
232 /* Set COAL_NOW to be ready quickly for the next push */
233 tx_push->tx_bd_len_flags_type =
234 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
235 TX_BD_TYPE_LONG_TX_BD |
236 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
237 TX_BD_FLAGS_COAL_NOW |
238 TX_BD_FLAGS_PACKET_END |
239 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
240
241 if (skb->ip_summed == CHECKSUM_PARTIAL)
242 tx_push1->tx_bd_hsize_lflags =
243 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
244 else
245 tx_push1->tx_bd_hsize_lflags = 0;
246
247 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
248 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
249
250 skb_copy_from_linear_data(skb, pdata, len);
251 pdata += len;
252 for (j = 0; j < last_frag; j++) {
253 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
254 void *fptr;
255
256 fptr = skb_frag_address_safe(frag);
257 if (!fptr)
258 goto normal_tx;
259
260 memcpy(pdata, fptr, skb_frag_size(frag));
261 pdata += skb_frag_size(frag);
262 }
263
264 memcpy(txbd, tx_push, sizeof(*txbd));
265 prod = NEXT_TX(prod);
266 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
267 memcpy(txbd, tx_push1, sizeof(*txbd));
268 prod = NEXT_TX(prod);
269 push->doorbell =
270 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
271 txr->tx_prod = prod;
272
273 netdev_tx_sent_queue(txq, skb->len);
274
275 __iowrite64_copy(txr->tx_doorbell, push,
276 (length + sizeof(*push) + 8) / 8);
277
278 tx_buf->is_push = 1;
279
280 goto tx_done;
281 }
282
283normal_tx:
284 if (length < BNXT_MIN_PKT_SIZE) {
285 pad = BNXT_MIN_PKT_SIZE - length;
286 if (skb_pad(skb, pad)) {
287 /* SKB already freed. */
288 tx_buf->skb = NULL;
289 return NETDEV_TX_OK;
290 }
291 length = BNXT_MIN_PKT_SIZE;
292 }
293
294 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
295
296 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
297 dev_kfree_skb_any(skb);
298 tx_buf->skb = NULL;
299 return NETDEV_TX_OK;
300 }
301
302 dma_unmap_addr_set(tx_buf, mapping, mapping);
303 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
304 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
305
306 txbd->tx_bd_haddr = cpu_to_le64(mapping);
307
308 prod = NEXT_TX(prod);
309 txbd1 = (struct tx_bd_ext *)
310 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
311
312 txbd1->tx_bd_hsize_lflags = 0;
313 if (skb_is_gso(skb)) {
314 u32 hdr_len;
315
316 if (skb->encapsulation)
317 hdr_len = skb_inner_network_offset(skb) +
318 skb_inner_network_header_len(skb) +
319 inner_tcp_hdrlen(skb);
320 else
321 hdr_len = skb_transport_offset(skb) +
322 tcp_hdrlen(skb);
323
324 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
325 TX_BD_FLAGS_T_IPID |
326 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
327 length = skb_shinfo(skb)->gso_size;
328 txbd1->tx_bd_mss = cpu_to_le32(length);
329 length += hdr_len;
330 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
331 txbd1->tx_bd_hsize_lflags =
332 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
333 txbd1->tx_bd_mss = 0;
334 }
335
336 length >>= 9;
337 flags |= bnxt_lhint_arr[length];
338 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
339
340 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
341 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
342 for (i = 0; i < last_frag; i++) {
343 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
344
345 prod = NEXT_TX(prod);
346 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
347
348 len = skb_frag_size(frag);
349 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
350 DMA_TO_DEVICE);
351
352 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
353 goto tx_dma_error;
354
355 tx_buf = &txr->tx_buf_ring[prod];
356 dma_unmap_addr_set(tx_buf, mapping, mapping);
357
358 txbd->tx_bd_haddr = cpu_to_le64(mapping);
359
360 flags = len << TX_BD_LEN_SHIFT;
361 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
362 }
363
364 flags &= ~TX_BD_LEN;
365 txbd->tx_bd_len_flags_type =
366 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
367 TX_BD_FLAGS_PACKET_END);
368
369 netdev_tx_sent_queue(txq, skb->len);
370
371 /* Sync BD data before updating doorbell */
372 wmb();
373
374 prod = NEXT_TX(prod);
375 txr->tx_prod = prod;
376
377 writel(DB_KEY_TX | prod, txr->tx_doorbell);
378 writel(DB_KEY_TX | prod, txr->tx_doorbell);
379
380tx_done:
381
382 mmiowb();
383
384 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
385 netif_tx_stop_queue(txq);
386
387 /* netif_tx_stop_queue() must be done before checking
388 * tx index in bnxt_tx_avail() below, because in
389 * bnxt_tx_int(), we update tx index before checking for
390 * netif_tx_queue_stopped().
391 */
392 smp_mb();
393 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
394 netif_tx_wake_queue(txq);
395 }
396 return NETDEV_TX_OK;
397
398tx_dma_error:
399 last_frag = i;
400
401 /* start back at beginning and unmap skb */
402 prod = txr->tx_prod;
403 tx_buf = &txr->tx_buf_ring[prod];
404 tx_buf->skb = NULL;
405 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
406 skb_headlen(skb), PCI_DMA_TODEVICE);
407 prod = NEXT_TX(prod);
408
409 /* unmap remaining mapped pages */
410 for (i = 0; i < last_frag; i++) {
411 prod = NEXT_TX(prod);
412 tx_buf = &txr->tx_buf_ring[prod];
413 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
414 skb_frag_size(&skb_shinfo(skb)->frags[i]),
415 PCI_DMA_TODEVICE);
416 }
417
418 dev_kfree_skb_any(skb);
419 return NETDEV_TX_OK;
420}
421
422static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
423{
Michael Chanb6ab4b02016-01-02 23:44:59 -0500424 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400425 int index = bnapi->index;
426 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
427 u16 cons = txr->tx_cons;
428 struct pci_dev *pdev = bp->pdev;
429 int i;
430 unsigned int tx_bytes = 0;
431
432 for (i = 0; i < nr_pkts; i++) {
433 struct bnxt_sw_tx_bd *tx_buf;
434 struct sk_buff *skb;
435 int j, last;
436
437 tx_buf = &txr->tx_buf_ring[cons];
438 cons = NEXT_TX(cons);
439 skb = tx_buf->skb;
440 tx_buf->skb = NULL;
441
442 if (tx_buf->is_push) {
443 tx_buf->is_push = 0;
444 goto next_tx_int;
445 }
446
447 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
448 skb_headlen(skb), PCI_DMA_TODEVICE);
449 last = tx_buf->nr_frags;
450
451 for (j = 0; j < last; j++) {
452 cons = NEXT_TX(cons);
453 tx_buf = &txr->tx_buf_ring[cons];
454 dma_unmap_page(
455 &pdev->dev,
456 dma_unmap_addr(tx_buf, mapping),
457 skb_frag_size(&skb_shinfo(skb)->frags[j]),
458 PCI_DMA_TODEVICE);
459 }
460
461next_tx_int:
462 cons = NEXT_TX(cons);
463
464 tx_bytes += skb->len;
465 dev_kfree_skb_any(skb);
466 }
467
468 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
469 txr->tx_cons = cons;
470
471 /* Need to make the tx_cons update visible to bnxt_start_xmit()
472 * before checking for netif_tx_queue_stopped(). Without the
473 * memory barrier, there is a small possibility that bnxt_start_xmit()
474 * will miss it and cause the queue to be stopped forever.
475 */
476 smp_mb();
477
478 if (unlikely(netif_tx_queue_stopped(txq)) &&
479 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
480 __netif_tx_lock(txq, smp_processor_id());
481 if (netif_tx_queue_stopped(txq) &&
482 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
483 txr->dev_state != BNXT_DEV_STATE_CLOSING)
484 netif_tx_wake_queue(txq);
485 __netif_tx_unlock(txq);
486 }
487}
488
489static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
490 gfp_t gfp)
491{
492 u8 *data;
493 struct pci_dev *pdev = bp->pdev;
494
495 data = kmalloc(bp->rx_buf_size, gfp);
496 if (!data)
497 return NULL;
498
499 *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
500 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
501
502 if (dma_mapping_error(&pdev->dev, *mapping)) {
503 kfree(data);
504 data = NULL;
505 }
506 return data;
507}
508
509static inline int bnxt_alloc_rx_data(struct bnxt *bp,
510 struct bnxt_rx_ring_info *rxr,
511 u16 prod, gfp_t gfp)
512{
513 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
514 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
515 u8 *data;
516 dma_addr_t mapping;
517
518 data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
519 if (!data)
520 return -ENOMEM;
521
522 rx_buf->data = data;
523 dma_unmap_addr_set(rx_buf, mapping, mapping);
524
525 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
526
527 return 0;
528}
529
530static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
531 u8 *data)
532{
533 u16 prod = rxr->rx_prod;
534 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
535 struct rx_bd *cons_bd, *prod_bd;
536
537 prod_rx_buf = &rxr->rx_buf_ring[prod];
538 cons_rx_buf = &rxr->rx_buf_ring[cons];
539
540 prod_rx_buf->data = data;
541
542 dma_unmap_addr_set(prod_rx_buf, mapping,
543 dma_unmap_addr(cons_rx_buf, mapping));
544
545 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
546 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
547
548 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
549}
550
551static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
552{
553 u16 next, max = rxr->rx_agg_bmap_size;
554
555 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
556 if (next >= max)
557 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
558 return next;
559}
560
561static inline int bnxt_alloc_rx_page(struct bnxt *bp,
562 struct bnxt_rx_ring_info *rxr,
563 u16 prod, gfp_t gfp)
564{
565 struct rx_bd *rxbd =
566 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
567 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
568 struct pci_dev *pdev = bp->pdev;
569 struct page *page;
570 dma_addr_t mapping;
571 u16 sw_prod = rxr->rx_sw_agg_prod;
572
573 page = alloc_page(gfp);
574 if (!page)
575 return -ENOMEM;
576
577 mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
578 PCI_DMA_FROMDEVICE);
579 if (dma_mapping_error(&pdev->dev, mapping)) {
580 __free_page(page);
581 return -EIO;
582 }
583
584 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
585 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
586
587 __set_bit(sw_prod, rxr->rx_agg_bmap);
588 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
589 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
590
591 rx_agg_buf->page = page;
592 rx_agg_buf->mapping = mapping;
593 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
594 rxbd->rx_bd_opaque = sw_prod;
595 return 0;
596}
597
598static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
599 u32 agg_bufs)
600{
601 struct bnxt *bp = bnapi->bp;
602 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500603 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400604 u16 prod = rxr->rx_agg_prod;
605 u16 sw_prod = rxr->rx_sw_agg_prod;
606 u32 i;
607
608 for (i = 0; i < agg_bufs; i++) {
609 u16 cons;
610 struct rx_agg_cmp *agg;
611 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
612 struct rx_bd *prod_bd;
613 struct page *page;
614
615 agg = (struct rx_agg_cmp *)
616 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
617 cons = agg->rx_agg_cmp_opaque;
618 __clear_bit(cons, rxr->rx_agg_bmap);
619
620 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
621 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
622
623 __set_bit(sw_prod, rxr->rx_agg_bmap);
624 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
625 cons_rx_buf = &rxr->rx_agg_ring[cons];
626
627 /* It is possible for sw_prod to be equal to cons, so
628 * set cons_rx_buf->page to NULL first.
629 */
630 page = cons_rx_buf->page;
631 cons_rx_buf->page = NULL;
632 prod_rx_buf->page = page;
633
634 prod_rx_buf->mapping = cons_rx_buf->mapping;
635
636 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
637
638 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
639 prod_bd->rx_bd_opaque = sw_prod;
640
641 prod = NEXT_RX_AGG(prod);
642 sw_prod = NEXT_RX_AGG(sw_prod);
643 cp_cons = NEXT_CMP(cp_cons);
644 }
645 rxr->rx_agg_prod = prod;
646 rxr->rx_sw_agg_prod = sw_prod;
647}
648
649static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
650 struct bnxt_rx_ring_info *rxr, u16 cons,
651 u16 prod, u8 *data, dma_addr_t dma_addr,
652 unsigned int len)
653{
654 int err;
655 struct sk_buff *skb;
656
657 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
658 if (unlikely(err)) {
659 bnxt_reuse_rx_data(rxr, cons, data);
660 return NULL;
661 }
662
663 skb = build_skb(data, 0);
664 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
665 PCI_DMA_FROMDEVICE);
666 if (!skb) {
667 kfree(data);
668 return NULL;
669 }
670
671 skb_reserve(skb, BNXT_RX_OFFSET);
672 skb_put(skb, len);
673 return skb;
674}
675
676static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
677 struct sk_buff *skb, u16 cp_cons,
678 u32 agg_bufs)
679{
680 struct pci_dev *pdev = bp->pdev;
681 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500682 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400683 u16 prod = rxr->rx_agg_prod;
684 u32 i;
685
686 for (i = 0; i < agg_bufs; i++) {
687 u16 cons, frag_len;
688 struct rx_agg_cmp *agg;
689 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
690 struct page *page;
691 dma_addr_t mapping;
692
693 agg = (struct rx_agg_cmp *)
694 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
695 cons = agg->rx_agg_cmp_opaque;
696 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
697 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
698
699 cons_rx_buf = &rxr->rx_agg_ring[cons];
700 skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
701 __clear_bit(cons, rxr->rx_agg_bmap);
702
703 /* It is possible for bnxt_alloc_rx_page() to allocate
704 * a sw_prod index that equals the cons index, so we
705 * need to clear the cons entry now.
706 */
707 mapping = dma_unmap_addr(cons_rx_buf, mapping);
708 page = cons_rx_buf->page;
709 cons_rx_buf->page = NULL;
710
711 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
712 struct skb_shared_info *shinfo;
713 unsigned int nr_frags;
714
715 shinfo = skb_shinfo(skb);
716 nr_frags = --shinfo->nr_frags;
717 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
718
719 dev_kfree_skb(skb);
720
721 cons_rx_buf->page = page;
722
723 /* Update prod since possibly some pages have been
724 * allocated already.
725 */
726 rxr->rx_agg_prod = prod;
727 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
728 return NULL;
729 }
730
731 dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
732 PCI_DMA_FROMDEVICE);
733
734 skb->data_len += frag_len;
735 skb->len += frag_len;
736 skb->truesize += PAGE_SIZE;
737
738 prod = NEXT_RX_AGG(prod);
739 cp_cons = NEXT_CMP(cp_cons);
740 }
741 rxr->rx_agg_prod = prod;
742 return skb;
743}
744
745static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
746 u8 agg_bufs, u32 *raw_cons)
747{
748 u16 last;
749 struct rx_agg_cmp *agg;
750
751 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
752 last = RING_CMP(*raw_cons);
753 agg = (struct rx_agg_cmp *)
754 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
755 return RX_AGG_CMP_VALID(agg, *raw_cons);
756}
757
758static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
759 unsigned int len,
760 dma_addr_t mapping)
761{
762 struct bnxt *bp = bnapi->bp;
763 struct pci_dev *pdev = bp->pdev;
764 struct sk_buff *skb;
765
766 skb = napi_alloc_skb(&bnapi->napi, len);
767 if (!skb)
768 return NULL;
769
770 dma_sync_single_for_cpu(&pdev->dev, mapping,
771 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
772
773 memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
774
775 dma_sync_single_for_device(&pdev->dev, mapping,
776 bp->rx_copy_thresh,
777 PCI_DMA_FROMDEVICE);
778
779 skb_put(skb, len);
780 return skb;
781}
782
783static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
784 struct rx_tpa_start_cmp *tpa_start,
785 struct rx_tpa_start_cmp_ext *tpa_start1)
786{
787 u8 agg_id = TPA_START_AGG_ID(tpa_start);
788 u16 cons, prod;
789 struct bnxt_tpa_info *tpa_info;
790 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
791 struct rx_bd *prod_bd;
792 dma_addr_t mapping;
793
794 cons = tpa_start->rx_tpa_start_cmp_opaque;
795 prod = rxr->rx_prod;
796 cons_rx_buf = &rxr->rx_buf_ring[cons];
797 prod_rx_buf = &rxr->rx_buf_ring[prod];
798 tpa_info = &rxr->rx_tpa[agg_id];
799
800 prod_rx_buf->data = tpa_info->data;
801
802 mapping = tpa_info->mapping;
803 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
804
805 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
806
807 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
808
809 tpa_info->data = cons_rx_buf->data;
810 cons_rx_buf->data = NULL;
811 tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
812
813 tpa_info->len =
814 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
815 RX_TPA_START_CMP_LEN_SHIFT;
816 if (likely(TPA_START_HASH_VALID(tpa_start))) {
817 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
818
819 tpa_info->hash_type = PKT_HASH_TYPE_L4;
820 tpa_info->gso_type = SKB_GSO_TCPV4;
821 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
822 if (hash_type == 3)
823 tpa_info->gso_type = SKB_GSO_TCPV6;
824 tpa_info->rss_hash =
825 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
826 } else {
827 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
828 tpa_info->gso_type = 0;
829 if (netif_msg_rx_err(bp))
830 netdev_warn(bp->dev, "TPA packet without valid hash\n");
831 }
832 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
833 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
834
835 rxr->rx_prod = NEXT_RX(prod);
836 cons = NEXT_RX(cons);
837 cons_rx_buf = &rxr->rx_buf_ring[cons];
838
839 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
840 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
841 cons_rx_buf->data = NULL;
842}
843
844static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
845 u16 cp_cons, u32 agg_bufs)
846{
847 if (agg_bufs)
848 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
849}
850
851#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
852#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
853
854static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
855 struct rx_tpa_end_cmp *tpa_end,
856 struct rx_tpa_end_cmp_ext *tpa_end1,
857 struct sk_buff *skb)
858{
Michael Chand1611c32015-10-25 22:27:57 -0400859#ifdef CONFIG_INET
Michael Chanc0c050c2015-10-22 16:01:17 -0400860 struct tcphdr *th;
861 int payload_off, tcp_opt_len = 0;
862 int len, nw_off;
Michael Chan27e24182015-12-27 18:19:23 -0500863 u16 segs;
Michael Chanc0c050c2015-10-22 16:01:17 -0400864
Michael Chan27e24182015-12-27 18:19:23 -0500865 segs = TPA_END_TPA_SEGS(tpa_end);
866 if (segs == 1)
867 return skb;
868
869 NAPI_GRO_CB(skb)->count = segs;
Michael Chanc0c050c2015-10-22 16:01:17 -0400870 skb_shinfo(skb)->gso_size =
871 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
872 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
873 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
874 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
875 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
876 if (TPA_END_GRO_TS(tpa_end))
877 tcp_opt_len = 12;
878
Michael Chanc0c050c2015-10-22 16:01:17 -0400879 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
880 struct iphdr *iph;
881
882 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
883 ETH_HLEN;
884 skb_set_network_header(skb, nw_off);
885 iph = ip_hdr(skb);
886 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
887 len = skb->len - skb_transport_offset(skb);
888 th = tcp_hdr(skb);
889 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
890 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
891 struct ipv6hdr *iph;
892
893 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
894 ETH_HLEN;
895 skb_set_network_header(skb, nw_off);
896 iph = ipv6_hdr(skb);
897 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
898 len = skb->len - skb_transport_offset(skb);
899 th = tcp_hdr(skb);
900 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
901 } else {
902 dev_kfree_skb_any(skb);
903 return NULL;
904 }
905 tcp_gro_complete(skb);
906
907 if (nw_off) { /* tunnel */
908 struct udphdr *uh = NULL;
909
910 if (skb->protocol == htons(ETH_P_IP)) {
911 struct iphdr *iph = (struct iphdr *)skb->data;
912
913 if (iph->protocol == IPPROTO_UDP)
914 uh = (struct udphdr *)(iph + 1);
915 } else {
916 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
917
918 if (iph->nexthdr == IPPROTO_UDP)
919 uh = (struct udphdr *)(iph + 1);
920 }
921 if (uh) {
922 if (uh->check)
923 skb_shinfo(skb)->gso_type |=
924 SKB_GSO_UDP_TUNNEL_CSUM;
925 else
926 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
927 }
928 }
929#endif
930 return skb;
931}
932
933static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
934 struct bnxt_napi *bnapi,
935 u32 *raw_cons,
936 struct rx_tpa_end_cmp *tpa_end,
937 struct rx_tpa_end_cmp_ext *tpa_end1,
938 bool *agg_event)
939{
940 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500941 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400942 u8 agg_id = TPA_END_AGG_ID(tpa_end);
943 u8 *data, agg_bufs;
944 u16 cp_cons = RING_CMP(*raw_cons);
945 unsigned int len;
946 struct bnxt_tpa_info *tpa_info;
947 dma_addr_t mapping;
948 struct sk_buff *skb;
949
950 tpa_info = &rxr->rx_tpa[agg_id];
951 data = tpa_info->data;
952 prefetch(data);
953 len = tpa_info->len;
954 mapping = tpa_info->mapping;
955
956 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
957 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
958
959 if (agg_bufs) {
960 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
961 return ERR_PTR(-EBUSY);
962
963 *agg_event = true;
964 cp_cons = NEXT_CMP(cp_cons);
965 }
966
967 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
968 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
969 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
970 agg_bufs, (int)MAX_SKB_FRAGS);
971 return NULL;
972 }
973
974 if (len <= bp->rx_copy_thresh) {
975 skb = bnxt_copy_skb(bnapi, data, len, mapping);
976 if (!skb) {
977 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
978 return NULL;
979 }
980 } else {
981 u8 *new_data;
982 dma_addr_t new_mapping;
983
984 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
985 if (!new_data) {
986 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
987 return NULL;
988 }
989
990 tpa_info->data = new_data;
991 tpa_info->mapping = new_mapping;
992
993 skb = build_skb(data, 0);
994 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
995 PCI_DMA_FROMDEVICE);
996
997 if (!skb) {
998 kfree(data);
999 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1000 return NULL;
1001 }
1002 skb_reserve(skb, BNXT_RX_OFFSET);
1003 skb_put(skb, len);
1004 }
1005
1006 if (agg_bufs) {
1007 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1008 if (!skb) {
1009 /* Page reuse already handled by bnxt_rx_pages(). */
1010 return NULL;
1011 }
1012 }
1013 skb->protocol = eth_type_trans(skb, bp->dev);
1014
1015 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1016 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1017
1018 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1019 netdev_features_t features = skb->dev->features;
1020 u16 vlan_proto = tpa_info->metadata >>
1021 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1022
1023 if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1024 vlan_proto == ETH_P_8021Q) ||
1025 ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1026 vlan_proto == ETH_P_8021AD)) {
1027 __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1028 tpa_info->metadata &
1029 RX_CMP_FLAGS2_METADATA_VID_MASK);
1030 }
1031 }
1032
1033 skb_checksum_none_assert(skb);
1034 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1035 skb->ip_summed = CHECKSUM_UNNECESSARY;
1036 skb->csum_level =
1037 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1038 }
1039
1040 if (TPA_END_GRO(tpa_end))
1041 skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
1042
1043 return skb;
1044}
1045
1046/* returns the following:
1047 * 1 - 1 packet successfully received
1048 * 0 - successful TPA_START, packet not completed yet
1049 * -EBUSY - completion ring does not have all the agg buffers yet
1050 * -ENOMEM - packet aborted due to out of memory
1051 * -EIO - packet aborted due to hw error indicated in BD
1052 */
1053static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1054 bool *agg_event)
1055{
1056 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001057 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001058 struct net_device *dev = bp->dev;
1059 struct rx_cmp *rxcmp;
1060 struct rx_cmp_ext *rxcmp1;
1061 u32 tmp_raw_cons = *raw_cons;
1062 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1063 struct bnxt_sw_rx_bd *rx_buf;
1064 unsigned int len;
1065 u8 *data, agg_bufs, cmp_type;
1066 dma_addr_t dma_addr;
1067 struct sk_buff *skb;
1068 int rc = 0;
1069
1070 rxcmp = (struct rx_cmp *)
1071 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1072
1073 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1074 cp_cons = RING_CMP(tmp_raw_cons);
1075 rxcmp1 = (struct rx_cmp_ext *)
1076 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1077
1078 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1079 return -EBUSY;
1080
1081 cmp_type = RX_CMP_TYPE(rxcmp);
1082
1083 prod = rxr->rx_prod;
1084
1085 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1086 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1087 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1088
1089 goto next_rx_no_prod;
1090
1091 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1092 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1093 (struct rx_tpa_end_cmp *)rxcmp,
1094 (struct rx_tpa_end_cmp_ext *)rxcmp1,
1095 agg_event);
1096
1097 if (unlikely(IS_ERR(skb)))
1098 return -EBUSY;
1099
1100 rc = -ENOMEM;
1101 if (likely(skb)) {
1102 skb_record_rx_queue(skb, bnapi->index);
1103 skb_mark_napi_id(skb, &bnapi->napi);
1104 if (bnxt_busy_polling(bnapi))
1105 netif_receive_skb(skb);
1106 else
1107 napi_gro_receive(&bnapi->napi, skb);
1108 rc = 1;
1109 }
1110 goto next_rx_no_prod;
1111 }
1112
1113 cons = rxcmp->rx_cmp_opaque;
1114 rx_buf = &rxr->rx_buf_ring[cons];
1115 data = rx_buf->data;
1116 prefetch(data);
1117
1118 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1119 RX_CMP_AGG_BUFS_SHIFT;
1120
1121 if (agg_bufs) {
1122 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1123 return -EBUSY;
1124
1125 cp_cons = NEXT_CMP(cp_cons);
1126 *agg_event = true;
1127 }
1128
1129 rx_buf->data = NULL;
1130 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1131 bnxt_reuse_rx_data(rxr, cons, data);
1132 if (agg_bufs)
1133 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1134
1135 rc = -EIO;
1136 goto next_rx;
1137 }
1138
1139 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1140 dma_addr = dma_unmap_addr(rx_buf, mapping);
1141
1142 if (len <= bp->rx_copy_thresh) {
1143 skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1144 bnxt_reuse_rx_data(rxr, cons, data);
1145 if (!skb) {
1146 rc = -ENOMEM;
1147 goto next_rx;
1148 }
1149 } else {
1150 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1151 if (!skb) {
1152 rc = -ENOMEM;
1153 goto next_rx;
1154 }
1155 }
1156
1157 if (agg_bufs) {
1158 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1159 if (!skb) {
1160 rc = -ENOMEM;
1161 goto next_rx;
1162 }
1163 }
1164
1165 if (RX_CMP_HASH_VALID(rxcmp)) {
1166 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1167 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1168
1169 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1170 if (hash_type != 1 && hash_type != 3)
1171 type = PKT_HASH_TYPE_L3;
1172 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1173 }
1174
1175 skb->protocol = eth_type_trans(skb, dev);
1176
1177 if (rxcmp1->rx_cmp_flags2 &
1178 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
1179 netdev_features_t features = skb->dev->features;
1180 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1181 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1182
1183 if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1184 vlan_proto == ETH_P_8021Q) ||
1185 ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1186 vlan_proto == ETH_P_8021AD))
1187 __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1188 meta_data &
1189 RX_CMP_FLAGS2_METADATA_VID_MASK);
1190 }
1191
1192 skb_checksum_none_assert(skb);
1193 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1194 if (dev->features & NETIF_F_RXCSUM) {
1195 skb->ip_summed = CHECKSUM_UNNECESSARY;
1196 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1197 }
1198 } else {
Satish Baddipadige665e3502015-12-27 18:19:21 -05001199 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1200 if (dev->features & NETIF_F_RXCSUM)
1201 cpr->rx_l4_csum_errors++;
1202 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001203 }
1204
1205 skb_record_rx_queue(skb, bnapi->index);
1206 skb_mark_napi_id(skb, &bnapi->napi);
1207 if (bnxt_busy_polling(bnapi))
1208 netif_receive_skb(skb);
1209 else
1210 napi_gro_receive(&bnapi->napi, skb);
1211 rc = 1;
1212
1213next_rx:
1214 rxr->rx_prod = NEXT_RX(prod);
1215
1216next_rx_no_prod:
1217 *raw_cons = tmp_raw_cons;
1218
1219 return rc;
1220}
1221
1222static int bnxt_async_event_process(struct bnxt *bp,
1223 struct hwrm_async_event_cmpl *cmpl)
1224{
1225 u16 event_id = le16_to_cpu(cmpl->event_id);
1226
1227 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1228 switch (event_id) {
1229 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1230 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1231 schedule_work(&bp->sp_task);
1232 break;
1233 default:
1234 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1235 event_id);
1236 break;
1237 }
1238 return 0;
1239}
1240
1241static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1242{
1243 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1244 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1245 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1246 (struct hwrm_fwd_req_cmpl *)txcmp;
1247
1248 switch (cmpl_type) {
1249 case CMPL_BASE_TYPE_HWRM_DONE:
1250 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1251 if (seq_id == bp->hwrm_intr_seq_id)
1252 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1253 else
1254 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1255 break;
1256
1257 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1258 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1259
1260 if ((vf_id < bp->pf.first_vf_id) ||
1261 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1262 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1263 vf_id);
1264 return -EINVAL;
1265 }
1266
1267 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1268 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1269 schedule_work(&bp->sp_task);
1270 break;
1271
1272 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1273 bnxt_async_event_process(bp,
1274 (struct hwrm_async_event_cmpl *)txcmp);
1275
1276 default:
1277 break;
1278 }
1279
1280 return 0;
1281}
1282
1283static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1284{
1285 struct bnxt_napi *bnapi = dev_instance;
1286 struct bnxt *bp = bnapi->bp;
1287 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1288 u32 cons = RING_CMP(cpr->cp_raw_cons);
1289
1290 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1291 napi_schedule(&bnapi->napi);
1292 return IRQ_HANDLED;
1293}
1294
1295static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1296{
1297 u32 raw_cons = cpr->cp_raw_cons;
1298 u16 cons = RING_CMP(raw_cons);
1299 struct tx_cmp *txcmp;
1300
1301 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1302
1303 return TX_CMP_VALID(txcmp, raw_cons);
1304}
1305
Michael Chanc0c050c2015-10-22 16:01:17 -04001306static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1307{
1308 struct bnxt_napi *bnapi = dev_instance;
1309 struct bnxt *bp = bnapi->bp;
1310 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1311 u32 cons = RING_CMP(cpr->cp_raw_cons);
1312 u32 int_status;
1313
1314 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1315
1316 if (!bnxt_has_work(bp, cpr)) {
Jeffrey Huang11809492015-11-05 16:25:49 -05001317 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001318 /* return if erroneous interrupt */
1319 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1320 return IRQ_NONE;
1321 }
1322
1323 /* disable ring IRQ */
1324 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1325
1326 /* Return here if interrupt is shared and is disabled. */
1327 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1328 return IRQ_HANDLED;
1329
1330 napi_schedule(&bnapi->napi);
1331 return IRQ_HANDLED;
1332}
1333
1334static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1335{
1336 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1337 u32 raw_cons = cpr->cp_raw_cons;
1338 u32 cons;
1339 int tx_pkts = 0;
1340 int rx_pkts = 0;
1341 bool rx_event = false;
1342 bool agg_event = false;
1343 struct tx_cmp *txcmp;
1344
1345 while (1) {
1346 int rc;
1347
1348 cons = RING_CMP(raw_cons);
1349 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1350
1351 if (!TX_CMP_VALID(txcmp, raw_cons))
1352 break;
1353
1354 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1355 tx_pkts++;
1356 /* return full budget so NAPI will complete. */
1357 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1358 rx_pkts = budget;
1359 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1360 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1361 if (likely(rc >= 0))
1362 rx_pkts += rc;
1363 else if (rc == -EBUSY) /* partial completion */
1364 break;
1365 rx_event = true;
1366 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1367 CMPL_BASE_TYPE_HWRM_DONE) ||
1368 (TX_CMP_TYPE(txcmp) ==
1369 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1370 (TX_CMP_TYPE(txcmp) ==
1371 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1372 bnxt_hwrm_handler(bp, txcmp);
1373 }
1374 raw_cons = NEXT_RAW_CMP(raw_cons);
1375
1376 if (rx_pkts == budget)
1377 break;
1378 }
1379
1380 cpr->cp_raw_cons = raw_cons;
1381 /* ACK completion ring before freeing tx ring and producing new
1382 * buffers in rx/agg rings to prevent overflowing the completion
1383 * ring.
1384 */
1385 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1386
1387 if (tx_pkts)
1388 bnxt_tx_int(bp, bnapi, tx_pkts);
1389
1390 if (rx_event) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001391 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001392
1393 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1394 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1395 if (agg_event) {
1396 writel(DB_KEY_RX | rxr->rx_agg_prod,
1397 rxr->rx_agg_doorbell);
1398 writel(DB_KEY_RX | rxr->rx_agg_prod,
1399 rxr->rx_agg_doorbell);
1400 }
1401 }
1402 return rx_pkts;
1403}
1404
1405static int bnxt_poll(struct napi_struct *napi, int budget)
1406{
1407 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1408 struct bnxt *bp = bnapi->bp;
1409 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1410 int work_done = 0;
1411
1412 if (!bnxt_lock_napi(bnapi))
1413 return budget;
1414
1415 while (1) {
1416 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1417
1418 if (work_done >= budget)
1419 break;
1420
1421 if (!bnxt_has_work(bp, cpr)) {
1422 napi_complete(napi);
1423 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1424 break;
1425 }
1426 }
1427 mmiowb();
1428 bnxt_unlock_napi(bnapi);
1429 return work_done;
1430}
1431
1432#ifdef CONFIG_NET_RX_BUSY_POLL
1433static int bnxt_busy_poll(struct napi_struct *napi)
1434{
1435 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1436 struct bnxt *bp = bnapi->bp;
1437 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1438 int rx_work, budget = 4;
1439
1440 if (atomic_read(&bp->intr_sem) != 0)
1441 return LL_FLUSH_FAILED;
1442
1443 if (!bnxt_lock_poll(bnapi))
1444 return LL_FLUSH_BUSY;
1445
1446 rx_work = bnxt_poll_work(bp, bnapi, budget);
1447
1448 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1449
1450 bnxt_unlock_poll(bnapi);
1451 return rx_work;
1452}
1453#endif
1454
1455static void bnxt_free_tx_skbs(struct bnxt *bp)
1456{
1457 int i, max_idx;
1458 struct pci_dev *pdev = bp->pdev;
1459
Michael Chanb6ab4b02016-01-02 23:44:59 -05001460 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001461 return;
1462
1463 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1464 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001465 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001466 int j;
1467
Michael Chanc0c050c2015-10-22 16:01:17 -04001468 for (j = 0; j < max_idx;) {
1469 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1470 struct sk_buff *skb = tx_buf->skb;
1471 int k, last;
1472
1473 if (!skb) {
1474 j++;
1475 continue;
1476 }
1477
1478 tx_buf->skb = NULL;
1479
1480 if (tx_buf->is_push) {
1481 dev_kfree_skb(skb);
1482 j += 2;
1483 continue;
1484 }
1485
1486 dma_unmap_single(&pdev->dev,
1487 dma_unmap_addr(tx_buf, mapping),
1488 skb_headlen(skb),
1489 PCI_DMA_TODEVICE);
1490
1491 last = tx_buf->nr_frags;
1492 j += 2;
1493 for (k = 0; k < last; k++, j = NEXT_TX(j)) {
1494 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1495
1496 tx_buf = &txr->tx_buf_ring[j];
1497 dma_unmap_page(
1498 &pdev->dev,
1499 dma_unmap_addr(tx_buf, mapping),
1500 skb_frag_size(frag), PCI_DMA_TODEVICE);
1501 }
1502 dev_kfree_skb(skb);
1503 }
1504 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1505 }
1506}
1507
1508static void bnxt_free_rx_skbs(struct bnxt *bp)
1509{
1510 int i, max_idx, max_agg_idx;
1511 struct pci_dev *pdev = bp->pdev;
1512
Michael Chanb6ab4b02016-01-02 23:44:59 -05001513 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001514 return;
1515
1516 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1517 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1518 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001519 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001520 int j;
1521
Michael Chanc0c050c2015-10-22 16:01:17 -04001522 if (rxr->rx_tpa) {
1523 for (j = 0; j < MAX_TPA; j++) {
1524 struct bnxt_tpa_info *tpa_info =
1525 &rxr->rx_tpa[j];
1526 u8 *data = tpa_info->data;
1527
1528 if (!data)
1529 continue;
1530
1531 dma_unmap_single(
1532 &pdev->dev,
1533 dma_unmap_addr(tpa_info, mapping),
1534 bp->rx_buf_use_size,
1535 PCI_DMA_FROMDEVICE);
1536
1537 tpa_info->data = NULL;
1538
1539 kfree(data);
1540 }
1541 }
1542
1543 for (j = 0; j < max_idx; j++) {
1544 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1545 u8 *data = rx_buf->data;
1546
1547 if (!data)
1548 continue;
1549
1550 dma_unmap_single(&pdev->dev,
1551 dma_unmap_addr(rx_buf, mapping),
1552 bp->rx_buf_use_size,
1553 PCI_DMA_FROMDEVICE);
1554
1555 rx_buf->data = NULL;
1556
1557 kfree(data);
1558 }
1559
1560 for (j = 0; j < max_agg_idx; j++) {
1561 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1562 &rxr->rx_agg_ring[j];
1563 struct page *page = rx_agg_buf->page;
1564
1565 if (!page)
1566 continue;
1567
1568 dma_unmap_page(&pdev->dev,
1569 dma_unmap_addr(rx_agg_buf, mapping),
1570 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1571
1572 rx_agg_buf->page = NULL;
1573 __clear_bit(j, rxr->rx_agg_bmap);
1574
1575 __free_page(page);
1576 }
1577 }
1578}
1579
1580static void bnxt_free_skbs(struct bnxt *bp)
1581{
1582 bnxt_free_tx_skbs(bp);
1583 bnxt_free_rx_skbs(bp);
1584}
1585
1586static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1587{
1588 struct pci_dev *pdev = bp->pdev;
1589 int i;
1590
1591 for (i = 0; i < ring->nr_pages; i++) {
1592 if (!ring->pg_arr[i])
1593 continue;
1594
1595 dma_free_coherent(&pdev->dev, ring->page_size,
1596 ring->pg_arr[i], ring->dma_arr[i]);
1597
1598 ring->pg_arr[i] = NULL;
1599 }
1600 if (ring->pg_tbl) {
1601 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1602 ring->pg_tbl, ring->pg_tbl_map);
1603 ring->pg_tbl = NULL;
1604 }
1605 if (ring->vmem_size && *ring->vmem) {
1606 vfree(*ring->vmem);
1607 *ring->vmem = NULL;
1608 }
1609}
1610
1611static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1612{
1613 int i;
1614 struct pci_dev *pdev = bp->pdev;
1615
1616 if (ring->nr_pages > 1) {
1617 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1618 ring->nr_pages * 8,
1619 &ring->pg_tbl_map,
1620 GFP_KERNEL);
1621 if (!ring->pg_tbl)
1622 return -ENOMEM;
1623 }
1624
1625 for (i = 0; i < ring->nr_pages; i++) {
1626 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1627 ring->page_size,
1628 &ring->dma_arr[i],
1629 GFP_KERNEL);
1630 if (!ring->pg_arr[i])
1631 return -ENOMEM;
1632
1633 if (ring->nr_pages > 1)
1634 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1635 }
1636
1637 if (ring->vmem_size) {
1638 *ring->vmem = vzalloc(ring->vmem_size);
1639 if (!(*ring->vmem))
1640 return -ENOMEM;
1641 }
1642 return 0;
1643}
1644
1645static void bnxt_free_rx_rings(struct bnxt *bp)
1646{
1647 int i;
1648
Michael Chanb6ab4b02016-01-02 23:44:59 -05001649 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001650 return;
1651
1652 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001653 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001654 struct bnxt_ring_struct *ring;
1655
Michael Chanc0c050c2015-10-22 16:01:17 -04001656 kfree(rxr->rx_tpa);
1657 rxr->rx_tpa = NULL;
1658
1659 kfree(rxr->rx_agg_bmap);
1660 rxr->rx_agg_bmap = NULL;
1661
1662 ring = &rxr->rx_ring_struct;
1663 bnxt_free_ring(bp, ring);
1664
1665 ring = &rxr->rx_agg_ring_struct;
1666 bnxt_free_ring(bp, ring);
1667 }
1668}
1669
1670static int bnxt_alloc_rx_rings(struct bnxt *bp)
1671{
1672 int i, rc, agg_rings = 0, tpa_rings = 0;
1673
Michael Chanb6ab4b02016-01-02 23:44:59 -05001674 if (!bp->rx_ring)
1675 return -ENOMEM;
1676
Michael Chanc0c050c2015-10-22 16:01:17 -04001677 if (bp->flags & BNXT_FLAG_AGG_RINGS)
1678 agg_rings = 1;
1679
1680 if (bp->flags & BNXT_FLAG_TPA)
1681 tpa_rings = 1;
1682
1683 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001684 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001685 struct bnxt_ring_struct *ring;
1686
Michael Chanc0c050c2015-10-22 16:01:17 -04001687 ring = &rxr->rx_ring_struct;
1688
1689 rc = bnxt_alloc_ring(bp, ring);
1690 if (rc)
1691 return rc;
1692
1693 if (agg_rings) {
1694 u16 mem_size;
1695
1696 ring = &rxr->rx_agg_ring_struct;
1697 rc = bnxt_alloc_ring(bp, ring);
1698 if (rc)
1699 return rc;
1700
1701 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
1702 mem_size = rxr->rx_agg_bmap_size / 8;
1703 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
1704 if (!rxr->rx_agg_bmap)
1705 return -ENOMEM;
1706
1707 if (tpa_rings) {
1708 rxr->rx_tpa = kcalloc(MAX_TPA,
1709 sizeof(struct bnxt_tpa_info),
1710 GFP_KERNEL);
1711 if (!rxr->rx_tpa)
1712 return -ENOMEM;
1713 }
1714 }
1715 }
1716 return 0;
1717}
1718
1719static void bnxt_free_tx_rings(struct bnxt *bp)
1720{
1721 int i;
1722 struct pci_dev *pdev = bp->pdev;
1723
Michael Chanb6ab4b02016-01-02 23:44:59 -05001724 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001725 return;
1726
1727 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001728 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001729 struct bnxt_ring_struct *ring;
1730
Michael Chanc0c050c2015-10-22 16:01:17 -04001731 if (txr->tx_push) {
1732 dma_free_coherent(&pdev->dev, bp->tx_push_size,
1733 txr->tx_push, txr->tx_push_mapping);
1734 txr->tx_push = NULL;
1735 }
1736
1737 ring = &txr->tx_ring_struct;
1738
1739 bnxt_free_ring(bp, ring);
1740 }
1741}
1742
1743static int bnxt_alloc_tx_rings(struct bnxt *bp)
1744{
1745 int i, j, rc;
1746 struct pci_dev *pdev = bp->pdev;
1747
1748 bp->tx_push_size = 0;
1749 if (bp->tx_push_thresh) {
1750 int push_size;
1751
1752 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1753 bp->tx_push_thresh);
1754
1755 if (push_size > 128) {
1756 push_size = 0;
1757 bp->tx_push_thresh = 0;
1758 }
1759
1760 bp->tx_push_size = push_size;
1761 }
1762
1763 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001764 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001765 struct bnxt_ring_struct *ring;
1766
Michael Chanc0c050c2015-10-22 16:01:17 -04001767 ring = &txr->tx_ring_struct;
1768
1769 rc = bnxt_alloc_ring(bp, ring);
1770 if (rc)
1771 return rc;
1772
1773 if (bp->tx_push_size) {
1774 struct tx_bd *txbd;
1775 dma_addr_t mapping;
1776
1777 /* One pre-allocated DMA buffer to backup
1778 * TX push operation
1779 */
1780 txr->tx_push = dma_alloc_coherent(&pdev->dev,
1781 bp->tx_push_size,
1782 &txr->tx_push_mapping,
1783 GFP_KERNEL);
1784
1785 if (!txr->tx_push)
1786 return -ENOMEM;
1787
1788 txbd = &txr->tx_push->txbd1;
1789
1790 mapping = txr->tx_push_mapping +
1791 sizeof(struct tx_push_bd);
1792 txbd->tx_bd_haddr = cpu_to_le64(mapping);
1793
1794 memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
1795 }
1796 ring->queue_id = bp->q_info[j].queue_id;
1797 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
1798 j++;
1799 }
1800 return 0;
1801}
1802
1803static void bnxt_free_cp_rings(struct bnxt *bp)
1804{
1805 int i;
1806
1807 if (!bp->bnapi)
1808 return;
1809
1810 for (i = 0; i < bp->cp_nr_rings; i++) {
1811 struct bnxt_napi *bnapi = bp->bnapi[i];
1812 struct bnxt_cp_ring_info *cpr;
1813 struct bnxt_ring_struct *ring;
1814
1815 if (!bnapi)
1816 continue;
1817
1818 cpr = &bnapi->cp_ring;
1819 ring = &cpr->cp_ring_struct;
1820
1821 bnxt_free_ring(bp, ring);
1822 }
1823}
1824
1825static int bnxt_alloc_cp_rings(struct bnxt *bp)
1826{
1827 int i, rc;
1828
1829 for (i = 0; i < bp->cp_nr_rings; i++) {
1830 struct bnxt_napi *bnapi = bp->bnapi[i];
1831 struct bnxt_cp_ring_info *cpr;
1832 struct bnxt_ring_struct *ring;
1833
1834 if (!bnapi)
1835 continue;
1836
1837 cpr = &bnapi->cp_ring;
1838 ring = &cpr->cp_ring_struct;
1839
1840 rc = bnxt_alloc_ring(bp, ring);
1841 if (rc)
1842 return rc;
1843 }
1844 return 0;
1845}
1846
1847static void bnxt_init_ring_struct(struct bnxt *bp)
1848{
1849 int i;
1850
1851 for (i = 0; i < bp->cp_nr_rings; i++) {
1852 struct bnxt_napi *bnapi = bp->bnapi[i];
1853 struct bnxt_cp_ring_info *cpr;
1854 struct bnxt_rx_ring_info *rxr;
1855 struct bnxt_tx_ring_info *txr;
1856 struct bnxt_ring_struct *ring;
1857
1858 if (!bnapi)
1859 continue;
1860
1861 cpr = &bnapi->cp_ring;
1862 ring = &cpr->cp_ring_struct;
1863 ring->nr_pages = bp->cp_nr_pages;
1864 ring->page_size = HW_CMPD_RING_SIZE;
1865 ring->pg_arr = (void **)cpr->cp_desc_ring;
1866 ring->dma_arr = cpr->cp_desc_mapping;
1867 ring->vmem_size = 0;
1868
Michael Chanb6ab4b02016-01-02 23:44:59 -05001869 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05001870 if (!rxr)
1871 goto skip_rx;
1872
Michael Chanc0c050c2015-10-22 16:01:17 -04001873 ring = &rxr->rx_ring_struct;
1874 ring->nr_pages = bp->rx_nr_pages;
1875 ring->page_size = HW_RXBD_RING_SIZE;
1876 ring->pg_arr = (void **)rxr->rx_desc_ring;
1877 ring->dma_arr = rxr->rx_desc_mapping;
1878 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
1879 ring->vmem = (void **)&rxr->rx_buf_ring;
1880
1881 ring = &rxr->rx_agg_ring_struct;
1882 ring->nr_pages = bp->rx_agg_nr_pages;
1883 ring->page_size = HW_RXBD_RING_SIZE;
1884 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
1885 ring->dma_arr = rxr->rx_agg_desc_mapping;
1886 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
1887 ring->vmem = (void **)&rxr->rx_agg_ring;
1888
Michael Chan3b2b7d92016-01-02 23:45:00 -05001889skip_rx:
Michael Chanb6ab4b02016-01-02 23:44:59 -05001890 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05001891 if (!txr)
1892 continue;
1893
Michael Chanc0c050c2015-10-22 16:01:17 -04001894 ring = &txr->tx_ring_struct;
1895 ring->nr_pages = bp->tx_nr_pages;
1896 ring->page_size = HW_RXBD_RING_SIZE;
1897 ring->pg_arr = (void **)txr->tx_desc_ring;
1898 ring->dma_arr = txr->tx_desc_mapping;
1899 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
1900 ring->vmem = (void **)&txr->tx_buf_ring;
1901 }
1902}
1903
1904static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
1905{
1906 int i;
1907 u32 prod;
1908 struct rx_bd **rx_buf_ring;
1909
1910 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
1911 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
1912 int j;
1913 struct rx_bd *rxbd;
1914
1915 rxbd = rx_buf_ring[i];
1916 if (!rxbd)
1917 continue;
1918
1919 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1920 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1921 rxbd->rx_bd_opaque = prod;
1922 }
1923 }
1924}
1925
1926static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
1927{
1928 struct net_device *dev = bp->dev;
Michael Chanc0c050c2015-10-22 16:01:17 -04001929 struct bnxt_rx_ring_info *rxr;
1930 struct bnxt_ring_struct *ring;
1931 u32 prod, type;
1932 int i;
1933
Michael Chanc0c050c2015-10-22 16:01:17 -04001934 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1935 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1936
1937 if (NET_IP_ALIGN == 2)
1938 type |= RX_BD_FLAGS_SOP;
1939
Michael Chanb6ab4b02016-01-02 23:44:59 -05001940 rxr = &bp->rx_ring[ring_nr];
Michael Chanc0c050c2015-10-22 16:01:17 -04001941 ring = &rxr->rx_ring_struct;
1942 bnxt_init_rxbd_pages(ring, type);
1943
1944 prod = rxr->rx_prod;
1945 for (i = 0; i < bp->rx_ring_size; i++) {
1946 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
1947 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
1948 ring_nr, i, bp->rx_ring_size);
1949 break;
1950 }
1951 prod = NEXT_RX(prod);
1952 }
1953 rxr->rx_prod = prod;
1954 ring->fw_ring_id = INVALID_HW_RING_ID;
1955
Michael Chanedd0c2c2015-12-27 18:19:19 -05001956 ring = &rxr->rx_agg_ring_struct;
1957 ring->fw_ring_id = INVALID_HW_RING_ID;
1958
Michael Chanc0c050c2015-10-22 16:01:17 -04001959 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
1960 return 0;
1961
Michael Chanc0c050c2015-10-22 16:01:17 -04001962 type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
1963 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1964
1965 bnxt_init_rxbd_pages(ring, type);
1966
1967 prod = rxr->rx_agg_prod;
1968 for (i = 0; i < bp->rx_agg_ring_size; i++) {
1969 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
1970 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
1971 ring_nr, i, bp->rx_ring_size);
1972 break;
1973 }
1974 prod = NEXT_RX_AGG(prod);
1975 }
1976 rxr->rx_agg_prod = prod;
Michael Chanc0c050c2015-10-22 16:01:17 -04001977
1978 if (bp->flags & BNXT_FLAG_TPA) {
1979 if (rxr->rx_tpa) {
1980 u8 *data;
1981 dma_addr_t mapping;
1982
1983 for (i = 0; i < MAX_TPA; i++) {
1984 data = __bnxt_alloc_rx_data(bp, &mapping,
1985 GFP_KERNEL);
1986 if (!data)
1987 return -ENOMEM;
1988
1989 rxr->rx_tpa[i].data = data;
1990 rxr->rx_tpa[i].mapping = mapping;
1991 }
1992 } else {
1993 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
1994 return -ENOMEM;
1995 }
1996 }
1997
1998 return 0;
1999}
2000
2001static int bnxt_init_rx_rings(struct bnxt *bp)
2002{
2003 int i, rc = 0;
2004
2005 for (i = 0; i < bp->rx_nr_rings; i++) {
2006 rc = bnxt_init_one_rx_ring(bp, i);
2007 if (rc)
2008 break;
2009 }
2010
2011 return rc;
2012}
2013
2014static int bnxt_init_tx_rings(struct bnxt *bp)
2015{
2016 u16 i;
2017
2018 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2019 MAX_SKB_FRAGS + 1);
2020
2021 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002022 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002023 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2024
2025 ring->fw_ring_id = INVALID_HW_RING_ID;
2026 }
2027
2028 return 0;
2029}
2030
2031static void bnxt_free_ring_grps(struct bnxt *bp)
2032{
2033 kfree(bp->grp_info);
2034 bp->grp_info = NULL;
2035}
2036
2037static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2038{
2039 int i;
2040
2041 if (irq_re_init) {
2042 bp->grp_info = kcalloc(bp->cp_nr_rings,
2043 sizeof(struct bnxt_ring_grp_info),
2044 GFP_KERNEL);
2045 if (!bp->grp_info)
2046 return -ENOMEM;
2047 }
2048 for (i = 0; i < bp->cp_nr_rings; i++) {
2049 if (irq_re_init)
2050 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2051 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2052 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2053 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2054 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2055 }
2056 return 0;
2057}
2058
2059static void bnxt_free_vnics(struct bnxt *bp)
2060{
2061 kfree(bp->vnic_info);
2062 bp->vnic_info = NULL;
2063 bp->nr_vnics = 0;
2064}
2065
2066static int bnxt_alloc_vnics(struct bnxt *bp)
2067{
2068 int num_vnics = 1;
2069
2070#ifdef CONFIG_RFS_ACCEL
2071 if (bp->flags & BNXT_FLAG_RFS)
2072 num_vnics += bp->rx_nr_rings;
2073#endif
2074
2075 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2076 GFP_KERNEL);
2077 if (!bp->vnic_info)
2078 return -ENOMEM;
2079
2080 bp->nr_vnics = num_vnics;
2081 return 0;
2082}
2083
2084static void bnxt_init_vnics(struct bnxt *bp)
2085{
2086 int i;
2087
2088 for (i = 0; i < bp->nr_vnics; i++) {
2089 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2090
2091 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2092 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
2093 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2094
2095 if (bp->vnic_info[i].rss_hash_key) {
2096 if (i == 0)
2097 prandom_bytes(vnic->rss_hash_key,
2098 HW_HASH_KEY_SIZE);
2099 else
2100 memcpy(vnic->rss_hash_key,
2101 bp->vnic_info[0].rss_hash_key,
2102 HW_HASH_KEY_SIZE);
2103 }
2104 }
2105}
2106
2107static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2108{
2109 int pages;
2110
2111 pages = ring_size / desc_per_pg;
2112
2113 if (!pages)
2114 return 1;
2115
2116 pages++;
2117
2118 while (pages & (pages - 1))
2119 pages++;
2120
2121 return pages;
2122}
2123
2124static void bnxt_set_tpa_flags(struct bnxt *bp)
2125{
2126 bp->flags &= ~BNXT_FLAG_TPA;
2127 if (bp->dev->features & NETIF_F_LRO)
2128 bp->flags |= BNXT_FLAG_LRO;
2129 if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
2130 bp->flags |= BNXT_FLAG_GRO;
2131}
2132
2133/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2134 * be set on entry.
2135 */
2136void bnxt_set_ring_params(struct bnxt *bp)
2137{
2138 u32 ring_size, rx_size, rx_space;
2139 u32 agg_factor = 0, agg_ring_size = 0;
2140
2141 /* 8 for CRC and VLAN */
2142 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2143
2144 rx_space = rx_size + NET_SKB_PAD +
2145 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2146
2147 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2148 ring_size = bp->rx_ring_size;
2149 bp->rx_agg_ring_size = 0;
2150 bp->rx_agg_nr_pages = 0;
2151
2152 if (bp->flags & BNXT_FLAG_TPA)
2153 agg_factor = 4;
2154
2155 bp->flags &= ~BNXT_FLAG_JUMBO;
2156 if (rx_space > PAGE_SIZE) {
2157 u32 jumbo_factor;
2158
2159 bp->flags |= BNXT_FLAG_JUMBO;
2160 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2161 if (jumbo_factor > agg_factor)
2162 agg_factor = jumbo_factor;
2163 }
2164 agg_ring_size = ring_size * agg_factor;
2165
2166 if (agg_ring_size) {
2167 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2168 RX_DESC_CNT);
2169 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2170 u32 tmp = agg_ring_size;
2171
2172 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2173 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2174 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2175 tmp, agg_ring_size);
2176 }
2177 bp->rx_agg_ring_size = agg_ring_size;
2178 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2179 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2180 rx_space = rx_size + NET_SKB_PAD +
2181 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2182 }
2183
2184 bp->rx_buf_use_size = rx_size;
2185 bp->rx_buf_size = rx_space;
2186
2187 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2188 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2189
2190 ring_size = bp->tx_ring_size;
2191 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2192 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2193
2194 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2195 bp->cp_ring_size = ring_size;
2196
2197 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2198 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2199 bp->cp_nr_pages = MAX_CP_PAGES;
2200 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2201 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2202 ring_size, bp->cp_ring_size);
2203 }
2204 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2205 bp->cp_ring_mask = bp->cp_bit - 1;
2206}
2207
2208static void bnxt_free_vnic_attributes(struct bnxt *bp)
2209{
2210 int i;
2211 struct bnxt_vnic_info *vnic;
2212 struct pci_dev *pdev = bp->pdev;
2213
2214 if (!bp->vnic_info)
2215 return;
2216
2217 for (i = 0; i < bp->nr_vnics; i++) {
2218 vnic = &bp->vnic_info[i];
2219
2220 kfree(vnic->fw_grp_ids);
2221 vnic->fw_grp_ids = NULL;
2222
2223 kfree(vnic->uc_list);
2224 vnic->uc_list = NULL;
2225
2226 if (vnic->mc_list) {
2227 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2228 vnic->mc_list, vnic->mc_list_mapping);
2229 vnic->mc_list = NULL;
2230 }
2231
2232 if (vnic->rss_table) {
2233 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2234 vnic->rss_table,
2235 vnic->rss_table_dma_addr);
2236 vnic->rss_table = NULL;
2237 }
2238
2239 vnic->rss_hash_key = NULL;
2240 vnic->flags = 0;
2241 }
2242}
2243
2244static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2245{
2246 int i, rc = 0, size;
2247 struct bnxt_vnic_info *vnic;
2248 struct pci_dev *pdev = bp->pdev;
2249 int max_rings;
2250
2251 for (i = 0; i < bp->nr_vnics; i++) {
2252 vnic = &bp->vnic_info[i];
2253
2254 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2255 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2256
2257 if (mem_size > 0) {
2258 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2259 if (!vnic->uc_list) {
2260 rc = -ENOMEM;
2261 goto out;
2262 }
2263 }
2264 }
2265
2266 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2267 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2268 vnic->mc_list =
2269 dma_alloc_coherent(&pdev->dev,
2270 vnic->mc_list_size,
2271 &vnic->mc_list_mapping,
2272 GFP_KERNEL);
2273 if (!vnic->mc_list) {
2274 rc = -ENOMEM;
2275 goto out;
2276 }
2277 }
2278
2279 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2280 max_rings = bp->rx_nr_rings;
2281 else
2282 max_rings = 1;
2283
2284 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2285 if (!vnic->fw_grp_ids) {
2286 rc = -ENOMEM;
2287 goto out;
2288 }
2289
2290 /* Allocate rss table and hash key */
2291 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2292 &vnic->rss_table_dma_addr,
2293 GFP_KERNEL);
2294 if (!vnic->rss_table) {
2295 rc = -ENOMEM;
2296 goto out;
2297 }
2298
2299 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2300
2301 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2302 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2303 }
2304 return 0;
2305
2306out:
2307 return rc;
2308}
2309
2310static void bnxt_free_hwrm_resources(struct bnxt *bp)
2311{
2312 struct pci_dev *pdev = bp->pdev;
2313
2314 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2315 bp->hwrm_cmd_resp_dma_addr);
2316
2317 bp->hwrm_cmd_resp_addr = NULL;
2318 if (bp->hwrm_dbg_resp_addr) {
2319 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2320 bp->hwrm_dbg_resp_addr,
2321 bp->hwrm_dbg_resp_dma_addr);
2322
2323 bp->hwrm_dbg_resp_addr = NULL;
2324 }
2325}
2326
2327static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2328{
2329 struct pci_dev *pdev = bp->pdev;
2330
2331 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2332 &bp->hwrm_cmd_resp_dma_addr,
2333 GFP_KERNEL);
2334 if (!bp->hwrm_cmd_resp_addr)
2335 return -ENOMEM;
2336 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2337 HWRM_DBG_REG_BUF_SIZE,
2338 &bp->hwrm_dbg_resp_dma_addr,
2339 GFP_KERNEL);
2340 if (!bp->hwrm_dbg_resp_addr)
2341 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2342
2343 return 0;
2344}
2345
2346static void bnxt_free_stats(struct bnxt *bp)
2347{
2348 u32 size, i;
2349 struct pci_dev *pdev = bp->pdev;
2350
2351 if (!bp->bnapi)
2352 return;
2353
2354 size = sizeof(struct ctx_hw_stats);
2355
2356 for (i = 0; i < bp->cp_nr_rings; i++) {
2357 struct bnxt_napi *bnapi = bp->bnapi[i];
2358 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2359
2360 if (cpr->hw_stats) {
2361 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2362 cpr->hw_stats_map);
2363 cpr->hw_stats = NULL;
2364 }
2365 }
2366}
2367
2368static int bnxt_alloc_stats(struct bnxt *bp)
2369{
2370 u32 size, i;
2371 struct pci_dev *pdev = bp->pdev;
2372
2373 size = sizeof(struct ctx_hw_stats);
2374
2375 for (i = 0; i < bp->cp_nr_rings; i++) {
2376 struct bnxt_napi *bnapi = bp->bnapi[i];
2377 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2378
2379 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2380 &cpr->hw_stats_map,
2381 GFP_KERNEL);
2382 if (!cpr->hw_stats)
2383 return -ENOMEM;
2384
2385 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2386 }
2387 return 0;
2388}
2389
2390static void bnxt_clear_ring_indices(struct bnxt *bp)
2391{
2392 int i;
2393
2394 if (!bp->bnapi)
2395 return;
2396
2397 for (i = 0; i < bp->cp_nr_rings; i++) {
2398 struct bnxt_napi *bnapi = bp->bnapi[i];
2399 struct bnxt_cp_ring_info *cpr;
2400 struct bnxt_rx_ring_info *rxr;
2401 struct bnxt_tx_ring_info *txr;
2402
2403 if (!bnapi)
2404 continue;
2405
2406 cpr = &bnapi->cp_ring;
2407 cpr->cp_raw_cons = 0;
2408
Michael Chanb6ab4b02016-01-02 23:44:59 -05002409 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002410 if (txr) {
2411 txr->tx_prod = 0;
2412 txr->tx_cons = 0;
2413 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002414
Michael Chanb6ab4b02016-01-02 23:44:59 -05002415 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002416 if (rxr) {
2417 rxr->rx_prod = 0;
2418 rxr->rx_agg_prod = 0;
2419 rxr->rx_sw_agg_prod = 0;
2420 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002421 }
2422}
2423
2424static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2425{
2426#ifdef CONFIG_RFS_ACCEL
2427 int i;
2428
2429 /* Under rtnl_lock and all our NAPIs have been disabled. It's
2430 * safe to delete the hash table.
2431 */
2432 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2433 struct hlist_head *head;
2434 struct hlist_node *tmp;
2435 struct bnxt_ntuple_filter *fltr;
2436
2437 head = &bp->ntp_fltr_hash_tbl[i];
2438 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2439 hlist_del(&fltr->hash);
2440 kfree(fltr);
2441 }
2442 }
2443 if (irq_reinit) {
2444 kfree(bp->ntp_fltr_bmap);
2445 bp->ntp_fltr_bmap = NULL;
2446 }
2447 bp->ntp_fltr_count = 0;
2448#endif
2449}
2450
2451static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2452{
2453#ifdef CONFIG_RFS_ACCEL
2454 int i, rc = 0;
2455
2456 if (!(bp->flags & BNXT_FLAG_RFS))
2457 return 0;
2458
2459 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2460 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2461
2462 bp->ntp_fltr_count = 0;
2463 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2464 GFP_KERNEL);
2465
2466 if (!bp->ntp_fltr_bmap)
2467 rc = -ENOMEM;
2468
2469 return rc;
2470#else
2471 return 0;
2472#endif
2473}
2474
2475static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2476{
2477 bnxt_free_vnic_attributes(bp);
2478 bnxt_free_tx_rings(bp);
2479 bnxt_free_rx_rings(bp);
2480 bnxt_free_cp_rings(bp);
2481 bnxt_free_ntp_fltrs(bp, irq_re_init);
2482 if (irq_re_init) {
2483 bnxt_free_stats(bp);
2484 bnxt_free_ring_grps(bp);
2485 bnxt_free_vnics(bp);
Michael Chanb6ab4b02016-01-02 23:44:59 -05002486 kfree(bp->tx_ring);
2487 bp->tx_ring = NULL;
2488 kfree(bp->rx_ring);
2489 bp->rx_ring = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002490 kfree(bp->bnapi);
2491 bp->bnapi = NULL;
2492 } else {
2493 bnxt_clear_ring_indices(bp);
2494 }
2495}
2496
2497static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2498{
2499 int i, rc, size, arr_size;
2500 void *bnapi;
2501
2502 if (irq_re_init) {
2503 /* Allocate bnapi mem pointer array and mem block for
2504 * all queues
2505 */
2506 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2507 bp->cp_nr_rings);
2508 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2509 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2510 if (!bnapi)
2511 return -ENOMEM;
2512
2513 bp->bnapi = bnapi;
2514 bnapi += arr_size;
2515 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2516 bp->bnapi[i] = bnapi;
2517 bp->bnapi[i]->index = i;
2518 bp->bnapi[i]->bp = bp;
2519 }
2520
Michael Chanb6ab4b02016-01-02 23:44:59 -05002521 bp->rx_ring = kcalloc(bp->rx_nr_rings,
2522 sizeof(struct bnxt_rx_ring_info),
2523 GFP_KERNEL);
2524 if (!bp->rx_ring)
2525 return -ENOMEM;
2526
2527 for (i = 0; i < bp->rx_nr_rings; i++) {
2528 bp->rx_ring[i].bnapi = bp->bnapi[i];
2529 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
2530 }
2531
2532 bp->tx_ring = kcalloc(bp->tx_nr_rings,
2533 sizeof(struct bnxt_tx_ring_info),
2534 GFP_KERNEL);
2535 if (!bp->tx_ring)
2536 return -ENOMEM;
2537
2538 for (i = 0; i < bp->tx_nr_rings; i++) {
2539 bp->tx_ring[i].bnapi = bp->bnapi[i];
2540 bp->bnapi[i]->tx_ring = &bp->tx_ring[i];
2541 }
2542
Michael Chanc0c050c2015-10-22 16:01:17 -04002543 rc = bnxt_alloc_stats(bp);
2544 if (rc)
2545 goto alloc_mem_err;
2546
2547 rc = bnxt_alloc_ntp_fltrs(bp);
2548 if (rc)
2549 goto alloc_mem_err;
2550
2551 rc = bnxt_alloc_vnics(bp);
2552 if (rc)
2553 goto alloc_mem_err;
2554 }
2555
2556 bnxt_init_ring_struct(bp);
2557
2558 rc = bnxt_alloc_rx_rings(bp);
2559 if (rc)
2560 goto alloc_mem_err;
2561
2562 rc = bnxt_alloc_tx_rings(bp);
2563 if (rc)
2564 goto alloc_mem_err;
2565
2566 rc = bnxt_alloc_cp_rings(bp);
2567 if (rc)
2568 goto alloc_mem_err;
2569
2570 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2571 BNXT_VNIC_UCAST_FLAG;
2572 rc = bnxt_alloc_vnic_attributes(bp);
2573 if (rc)
2574 goto alloc_mem_err;
2575 return 0;
2576
2577alloc_mem_err:
2578 bnxt_free_mem(bp, true);
2579 return rc;
2580}
2581
2582void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
2583 u16 cmpl_ring, u16 target_id)
2584{
2585 struct hwrm_cmd_req_hdr *req = request;
2586
2587 req->cmpl_ring_req_type =
2588 cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
2589 req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
2590 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
2591}
2592
2593int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2594{
2595 int i, intr_process, rc;
2596 struct hwrm_cmd_req_hdr *req = msg;
2597 u32 *data = msg;
2598 __le32 *resp_len, *valid;
2599 u16 cp_ring_id, len = 0;
2600 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
2601
2602 req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
2603 memset(resp, 0, PAGE_SIZE);
2604 cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
2605 HWRM_CMPL_RING_MASK) >>
2606 HWRM_CMPL_RING_SFT;
2607 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
2608
2609 /* Write request msg to hwrm channel */
2610 __iowrite32_copy(bp->bar0, data, msg_len / 4);
2611
2612 /* currently supports only one outstanding message */
2613 if (intr_process)
2614 bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
2615 HWRM_SEQ_ID_MASK;
2616
2617 /* Ring channel doorbell */
2618 writel(1, bp->bar0 + 0x100);
2619
2620 i = 0;
2621 if (intr_process) {
2622 /* Wait until hwrm response cmpl interrupt is processed */
2623 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
2624 i++ < timeout) {
2625 usleep_range(600, 800);
2626 }
2627
2628 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
2629 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
2630 req->cmpl_ring_req_type);
2631 return -1;
2632 }
2633 } else {
2634 /* Check if response len is updated */
2635 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
2636 for (i = 0; i < timeout; i++) {
2637 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
2638 HWRM_RESP_LEN_SFT;
2639 if (len)
2640 break;
2641 usleep_range(600, 800);
2642 }
2643
2644 if (i >= timeout) {
2645 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
2646 timeout, req->cmpl_ring_req_type,
2647 req->target_id_seq_id, *resp_len);
2648 return -1;
2649 }
2650
2651 /* Last word of resp contains valid bit */
2652 valid = bp->hwrm_cmd_resp_addr + len - 4;
2653 for (i = 0; i < timeout; i++) {
2654 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
2655 break;
2656 usleep_range(600, 800);
2657 }
2658
2659 if (i >= timeout) {
2660 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
2661 timeout, req->cmpl_ring_req_type,
2662 req->target_id_seq_id, len, *valid);
2663 return -1;
2664 }
2665 }
2666
2667 rc = le16_to_cpu(resp->error_code);
2668 if (rc) {
2669 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
2670 le16_to_cpu(resp->req_type),
2671 le16_to_cpu(resp->seq_id), rc);
2672 return rc;
2673 }
2674 return 0;
2675}
2676
2677int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2678{
2679 int rc;
2680
2681 mutex_lock(&bp->hwrm_cmd_lock);
2682 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
2683 mutex_unlock(&bp->hwrm_cmd_lock);
2684 return rc;
2685}
2686
2687static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2688{
2689 struct hwrm_func_drv_rgtr_input req = {0};
2690 int i;
2691
2692 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
2693
2694 req.enables =
2695 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2696 FUNC_DRV_RGTR_REQ_ENABLES_VER |
2697 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
2698
2699 /* TODO: current async event fwd bits are not defined and the firmware
2700 * only checks if it is non-zero to enable async event forwarding
2701 */
2702 req.async_event_fwd[0] |= cpu_to_le32(1);
2703 req.os_type = cpu_to_le16(1);
2704 req.ver_maj = DRV_VER_MAJ;
2705 req.ver_min = DRV_VER_MIN;
2706 req.ver_upd = DRV_VER_UPD;
2707
2708 if (BNXT_PF(bp)) {
Michael Chande68f5de2015-12-09 19:35:41 -05002709 DECLARE_BITMAP(vf_req_snif_bmap, 256);
Michael Chanc0c050c2015-10-22 16:01:17 -04002710 u32 *data = (u32 *)vf_req_snif_bmap;
2711
Michael Chande68f5de2015-12-09 19:35:41 -05002712 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
Michael Chanc0c050c2015-10-22 16:01:17 -04002713 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2714 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2715
Michael Chande68f5de2015-12-09 19:35:41 -05002716 for (i = 0; i < 8; i++)
2717 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2718
Michael Chanc0c050c2015-10-22 16:01:17 -04002719 req.enables |=
2720 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2721 }
2722
2723 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2724}
2725
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05002726static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
2727{
2728 struct hwrm_func_drv_unrgtr_input req = {0};
2729
2730 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
2731 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2732}
2733
Michael Chanc0c050c2015-10-22 16:01:17 -04002734static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
2735{
2736 u32 rc = 0;
2737 struct hwrm_tunnel_dst_port_free_input req = {0};
2738
2739 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
2740 req.tunnel_type = tunnel_type;
2741
2742 switch (tunnel_type) {
2743 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
2744 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
2745 break;
2746 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
2747 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
2748 break;
2749 default:
2750 break;
2751 }
2752
2753 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2754 if (rc)
2755 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
2756 rc);
2757 return rc;
2758}
2759
2760static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
2761 u8 tunnel_type)
2762{
2763 u32 rc = 0;
2764 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2765 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2766
2767 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
2768
2769 req.tunnel_type = tunnel_type;
2770 req.tunnel_dst_port_val = port;
2771
2772 mutex_lock(&bp->hwrm_cmd_lock);
2773 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2774 if (rc) {
2775 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
2776 rc);
2777 goto err_out;
2778 }
2779
2780 if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
2781 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2782
2783 else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
2784 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
2785err_out:
2786 mutex_unlock(&bp->hwrm_cmd_lock);
2787 return rc;
2788}
2789
2790static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
2791{
2792 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2793 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2794
2795 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -05002796 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04002797
2798 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
2799 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
2800 req.mask = cpu_to_le32(vnic->rx_mask);
2801 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2802}
2803
2804#ifdef CONFIG_RFS_ACCEL
2805static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
2806 struct bnxt_ntuple_filter *fltr)
2807{
2808 struct hwrm_cfa_ntuple_filter_free_input req = {0};
2809
2810 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
2811 req.ntuple_filter_id = fltr->filter_id;
2812 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2813}
2814
2815#define BNXT_NTP_FLTR_FLAGS \
2816 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
2817 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
2818 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
2819 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
2820 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
2821 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
2822 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
2823 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
2824 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
2825 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
2826 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
2827 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
2828 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
Michael Chanc1935542015-12-27 18:19:28 -05002829 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04002830
2831static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
2832 struct bnxt_ntuple_filter *fltr)
2833{
2834 int rc = 0;
2835 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
2836 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
2837 bp->hwrm_cmd_resp_addr;
2838 struct flow_keys *keys = &fltr->fkeys;
2839 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
2840
2841 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
2842 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
2843
2844 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
2845
2846 req.ethertype = htons(ETH_P_IP);
2847 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
Michael Chanc1935542015-12-27 18:19:28 -05002848 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
Michael Chanc0c050c2015-10-22 16:01:17 -04002849 req.ip_protocol = keys->basic.ip_proto;
2850
2851 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
2852 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2853 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
2854 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2855
2856 req.src_port = keys->ports.src;
2857 req.src_port_mask = cpu_to_be16(0xffff);
2858 req.dst_port = keys->ports.dst;
2859 req.dst_port_mask = cpu_to_be16(0xffff);
2860
Michael Chanc1935542015-12-27 18:19:28 -05002861 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04002862 mutex_lock(&bp->hwrm_cmd_lock);
2863 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2864 if (!rc)
2865 fltr->filter_id = resp->ntuple_filter_id;
2866 mutex_unlock(&bp->hwrm_cmd_lock);
2867 return rc;
2868}
2869#endif
2870
2871static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
2872 u8 *mac_addr)
2873{
2874 u32 rc = 0;
2875 struct hwrm_cfa_l2_filter_alloc_input req = {0};
2876 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2877
2878 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
2879 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
2880 CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
Michael Chanc1935542015-12-27 18:19:28 -05002881 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04002882 req.enables =
2883 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
Michael Chanc1935542015-12-27 18:19:28 -05002884 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
Michael Chanc0c050c2015-10-22 16:01:17 -04002885 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
2886 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
2887 req.l2_addr_mask[0] = 0xff;
2888 req.l2_addr_mask[1] = 0xff;
2889 req.l2_addr_mask[2] = 0xff;
2890 req.l2_addr_mask[3] = 0xff;
2891 req.l2_addr_mask[4] = 0xff;
2892 req.l2_addr_mask[5] = 0xff;
2893
2894 mutex_lock(&bp->hwrm_cmd_lock);
2895 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2896 if (!rc)
2897 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
2898 resp->l2_filter_id;
2899 mutex_unlock(&bp->hwrm_cmd_lock);
2900 return rc;
2901}
2902
2903static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
2904{
2905 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
2906 int rc = 0;
2907
2908 /* Any associated ntuple filters will also be cleared by firmware. */
2909 mutex_lock(&bp->hwrm_cmd_lock);
2910 for (i = 0; i < num_of_vnics; i++) {
2911 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2912
2913 for (j = 0; j < vnic->uc_filter_count; j++) {
2914 struct hwrm_cfa_l2_filter_free_input req = {0};
2915
2916 bnxt_hwrm_cmd_hdr_init(bp, &req,
2917 HWRM_CFA_L2_FILTER_FREE, -1, -1);
2918
2919 req.l2_filter_id = vnic->fw_l2_filter_id[j];
2920
2921 rc = _hwrm_send_message(bp, &req, sizeof(req),
2922 HWRM_CMD_TIMEOUT);
2923 }
2924 vnic->uc_filter_count = 0;
2925 }
2926 mutex_unlock(&bp->hwrm_cmd_lock);
2927
2928 return rc;
2929}
2930
2931static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
2932{
2933 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2934 struct hwrm_vnic_tpa_cfg_input req = {0};
2935
2936 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
2937
2938 if (tpa_flags) {
2939 u16 mss = bp->dev->mtu - 40;
2940 u32 nsegs, n, segs = 0, flags;
2941
2942 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
2943 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
2944 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
2945 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
2946 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
2947 if (tpa_flags & BNXT_FLAG_GRO)
2948 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
2949
2950 req.flags = cpu_to_le32(flags);
2951
2952 req.enables =
2953 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
Michael Chanc1935542015-12-27 18:19:28 -05002954 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
2955 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04002956
2957 /* Number of segs are log2 units, and first packet is not
2958 * included as part of this units.
2959 */
2960 if (mss <= PAGE_SIZE) {
2961 n = PAGE_SIZE / mss;
2962 nsegs = (MAX_SKB_FRAGS - 1) * n;
2963 } else {
2964 n = mss / PAGE_SIZE;
2965 if (mss & (PAGE_SIZE - 1))
2966 n++;
2967 nsegs = (MAX_SKB_FRAGS - n) / n;
2968 }
2969
2970 segs = ilog2(nsegs);
2971 req.max_agg_segs = cpu_to_le16(segs);
2972 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
Michael Chanc1935542015-12-27 18:19:28 -05002973
2974 req.min_agg_len = cpu_to_le32(512);
Michael Chanc0c050c2015-10-22 16:01:17 -04002975 }
2976 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
2977
2978 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2979}
2980
2981static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
2982{
2983 u32 i, j, max_rings;
2984 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2985 struct hwrm_vnic_rss_cfg_input req = {0};
2986
2987 if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
2988 return 0;
2989
2990 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
2991 if (set_rss) {
2992 vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
2993 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
2994 BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
2995 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
2996
2997 req.hash_type = cpu_to_le32(vnic->hash_type);
2998
2999 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3000 max_rings = bp->rx_nr_rings;
3001 else
3002 max_rings = 1;
3003
3004 /* Fill the RSS indirection table with ring group ids */
3005 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3006 if (j == max_rings)
3007 j = 0;
3008 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3009 }
3010
3011 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3012 req.hash_key_tbl_addr =
3013 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3014 }
3015 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3016 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3017}
3018
3019static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3020{
3021 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3022 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3023
3024 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3025 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3026 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3027 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3028 req.enables =
3029 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3030 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3031 /* thresholds not implemented in firmware yet */
3032 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3033 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3034 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3035 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3036}
3037
3038static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
3039{
3040 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3041
3042 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3043 req.rss_cos_lb_ctx_id =
3044 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
3045
3046 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3047 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3048}
3049
3050static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3051{
3052 int i;
3053
3054 for (i = 0; i < bp->nr_vnics; i++) {
3055 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3056
3057 if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
3058 bnxt_hwrm_vnic_ctx_free_one(bp, i);
3059 }
3060 bp->rsscos_nr_ctxs = 0;
3061}
3062
3063static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
3064{
3065 int rc;
3066 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3067 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3068 bp->hwrm_cmd_resp_addr;
3069
3070 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3071 -1);
3072
3073 mutex_lock(&bp->hwrm_cmd_lock);
3074 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3075 if (!rc)
3076 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
3077 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3078 mutex_unlock(&bp->hwrm_cmd_lock);
3079
3080 return rc;
3081}
3082
3083static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3084{
3085 int grp_idx = 0;
3086 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3087 struct hwrm_vnic_cfg_input req = {0};
3088
3089 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3090 /* Only RSS support for now TBD: COS & LB */
3091 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
3092 VNIC_CFG_REQ_ENABLES_RSS_RULE);
3093 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3094 req.cos_rule = cpu_to_le16(0xffff);
3095 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3096 grp_idx = 0;
3097 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3098 grp_idx = vnic_id - 1;
3099
3100 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3101 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3102
3103 req.lb_rule = cpu_to_le16(0xffff);
3104 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3105 VLAN_HLEN);
3106
3107 if (bp->flags & BNXT_FLAG_STRIP_VLAN)
3108 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3109
3110 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3111}
3112
3113static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3114{
3115 u32 rc = 0;
3116
3117 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3118 struct hwrm_vnic_free_input req = {0};
3119
3120 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3121 req.vnic_id =
3122 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3123
3124 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3125 if (rc)
3126 return rc;
3127 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3128 }
3129 return rc;
3130}
3131
3132static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3133{
3134 u16 i;
3135
3136 for (i = 0; i < bp->nr_vnics; i++)
3137 bnxt_hwrm_vnic_free_one(bp, i);
3138}
3139
3140static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
3141 u16 end_grp_id)
3142{
3143 u32 rc = 0, i, j;
3144 struct hwrm_vnic_alloc_input req = {0};
3145 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3146
3147 /* map ring groups to this vnic */
3148 for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
3149 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
3150 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3151 j, (end_grp_id - start_grp_id));
3152 break;
3153 }
3154 bp->vnic_info[vnic_id].fw_grp_ids[j] =
3155 bp->grp_info[i].fw_grp_id;
3156 }
3157
3158 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3159 if (vnic_id == 0)
3160 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3161
3162 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3163
3164 mutex_lock(&bp->hwrm_cmd_lock);
3165 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3166 if (!rc)
3167 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3168 mutex_unlock(&bp->hwrm_cmd_lock);
3169 return rc;
3170}
3171
3172static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3173{
3174 u16 i;
3175 u32 rc = 0;
3176
3177 mutex_lock(&bp->hwrm_cmd_lock);
3178 for (i = 0; i < bp->rx_nr_rings; i++) {
3179 struct hwrm_ring_grp_alloc_input req = {0};
3180 struct hwrm_ring_grp_alloc_output *resp =
3181 bp->hwrm_cmd_resp_addr;
3182
3183 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3184
3185 req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3186 req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
3187 req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
3188 req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
3189
3190 rc = _hwrm_send_message(bp, &req, sizeof(req),
3191 HWRM_CMD_TIMEOUT);
3192 if (rc)
3193 break;
3194
3195 bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
3196 }
3197 mutex_unlock(&bp->hwrm_cmd_lock);
3198 return rc;
3199}
3200
3201static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3202{
3203 u16 i;
3204 u32 rc = 0;
3205 struct hwrm_ring_grp_free_input req = {0};
3206
3207 if (!bp->grp_info)
3208 return 0;
3209
3210 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3211
3212 mutex_lock(&bp->hwrm_cmd_lock);
3213 for (i = 0; i < bp->cp_nr_rings; i++) {
3214 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3215 continue;
3216 req.ring_group_id =
3217 cpu_to_le32(bp->grp_info[i].fw_grp_id);
3218
3219 rc = _hwrm_send_message(bp, &req, sizeof(req),
3220 HWRM_CMD_TIMEOUT);
3221 if (rc)
3222 break;
3223 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3224 }
3225 mutex_unlock(&bp->hwrm_cmd_lock);
3226 return rc;
3227}
3228
3229static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3230 struct bnxt_ring_struct *ring,
3231 u32 ring_type, u32 map_index,
3232 u32 stats_ctx_id)
3233{
3234 int rc = 0, err = 0;
3235 struct hwrm_ring_alloc_input req = {0};
3236 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3237 u16 ring_id;
3238
3239 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3240
3241 req.enables = 0;
3242 if (ring->nr_pages > 1) {
3243 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3244 /* Page size is in log2 units */
3245 req.page_size = BNXT_PAGE_SHIFT;
3246 req.page_tbl_depth = 1;
3247 } else {
3248 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
3249 }
3250 req.fbo = 0;
3251 /* Association of ring index with doorbell index and MSIX number */
3252 req.logical_id = cpu_to_le16(map_index);
3253
3254 switch (ring_type) {
3255 case HWRM_RING_ALLOC_TX:
3256 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3257 /* Association of transmit ring with completion ring */
3258 req.cmpl_ring_id =
3259 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3260 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3261 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3262 req.queue_id = cpu_to_le16(ring->queue_id);
3263 break;
3264 case HWRM_RING_ALLOC_RX:
3265 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3266 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3267 break;
3268 case HWRM_RING_ALLOC_AGG:
3269 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3270 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3271 break;
3272 case HWRM_RING_ALLOC_CMPL:
3273 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3274 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3275 if (bp->flags & BNXT_FLAG_USING_MSIX)
3276 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3277 break;
3278 default:
3279 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3280 ring_type);
3281 return -1;
3282 }
3283
3284 mutex_lock(&bp->hwrm_cmd_lock);
3285 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3286 err = le16_to_cpu(resp->error_code);
3287 ring_id = le16_to_cpu(resp->ring_id);
3288 mutex_unlock(&bp->hwrm_cmd_lock);
3289
3290 if (rc || err) {
3291 switch (ring_type) {
3292 case RING_FREE_REQ_RING_TYPE_CMPL:
3293 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3294 rc, err);
3295 return -1;
3296
3297 case RING_FREE_REQ_RING_TYPE_RX:
3298 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3299 rc, err);
3300 return -1;
3301
3302 case RING_FREE_REQ_RING_TYPE_TX:
3303 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3304 rc, err);
3305 return -1;
3306
3307 default:
3308 netdev_err(bp->dev, "Invalid ring\n");
3309 return -1;
3310 }
3311 }
3312 ring->fw_ring_id = ring_id;
3313 return rc;
3314}
3315
3316static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3317{
3318 int i, rc = 0;
3319
Michael Chanedd0c2c2015-12-27 18:19:19 -05003320 for (i = 0; i < bp->cp_nr_rings; i++) {
3321 struct bnxt_napi *bnapi = bp->bnapi[i];
3322 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3323 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04003324
Michael Chanedd0c2c2015-12-27 18:19:19 -05003325 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3326 INVALID_STATS_CTX_ID);
3327 if (rc)
3328 goto err_out;
3329 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3330 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3331 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003332 }
3333
Michael Chanedd0c2c2015-12-27 18:19:19 -05003334 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003335 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003336 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3337 u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
Michael Chanc0c050c2015-10-22 16:01:17 -04003338
Michael Chanedd0c2c2015-12-27 18:19:19 -05003339 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, i,
3340 fw_stats_ctx);
3341 if (rc)
3342 goto err_out;
3343 txr->tx_doorbell = bp->bar1 + i * 0x80;
Michael Chanc0c050c2015-10-22 16:01:17 -04003344 }
3345
Michael Chanedd0c2c2015-12-27 18:19:19 -05003346 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003347 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003348 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04003349
Michael Chanedd0c2c2015-12-27 18:19:19 -05003350 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, i,
3351 INVALID_STATS_CTX_ID);
3352 if (rc)
3353 goto err_out;
3354 rxr->rx_doorbell = bp->bar1 + i * 0x80;
3355 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
3356 bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003357 }
3358
3359 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3360 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003361 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04003362 struct bnxt_ring_struct *ring =
3363 &rxr->rx_agg_ring_struct;
3364
3365 rc = hwrm_ring_alloc_send_msg(bp, ring,
3366 HWRM_RING_ALLOC_AGG,
3367 bp->rx_nr_rings + i,
3368 INVALID_STATS_CTX_ID);
3369 if (rc)
3370 goto err_out;
3371
3372 rxr->rx_agg_doorbell =
3373 bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
3374 writel(DB_KEY_RX | rxr->rx_agg_prod,
3375 rxr->rx_agg_doorbell);
3376 bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
3377 }
3378 }
3379err_out:
3380 return rc;
3381}
3382
3383static int hwrm_ring_free_send_msg(struct bnxt *bp,
3384 struct bnxt_ring_struct *ring,
3385 u32 ring_type, int cmpl_ring_id)
3386{
3387 int rc;
3388 struct hwrm_ring_free_input req = {0};
3389 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3390 u16 error_code;
3391
3392 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
3393 req.ring_type = ring_type;
3394 req.ring_id = cpu_to_le16(ring->fw_ring_id);
3395
3396 mutex_lock(&bp->hwrm_cmd_lock);
3397 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3398 error_code = le16_to_cpu(resp->error_code);
3399 mutex_unlock(&bp->hwrm_cmd_lock);
3400
3401 if (rc || error_code) {
3402 switch (ring_type) {
3403 case RING_FREE_REQ_RING_TYPE_CMPL:
3404 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3405 rc);
3406 return rc;
3407 case RING_FREE_REQ_RING_TYPE_RX:
3408 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3409 rc);
3410 return rc;
3411 case RING_FREE_REQ_RING_TYPE_TX:
3412 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3413 rc);
3414 return rc;
3415 default:
3416 netdev_err(bp->dev, "Invalid ring\n");
3417 return -1;
3418 }
3419 }
3420 return 0;
3421}
3422
Michael Chanedd0c2c2015-12-27 18:19:19 -05003423static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
Michael Chanc0c050c2015-10-22 16:01:17 -04003424{
Michael Chanedd0c2c2015-12-27 18:19:19 -05003425 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003426
3427 if (!bp->bnapi)
Michael Chanedd0c2c2015-12-27 18:19:19 -05003428 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04003429
Michael Chanedd0c2c2015-12-27 18:19:19 -05003430 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003431 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003432 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3433 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003434
Michael Chanedd0c2c2015-12-27 18:19:19 -05003435 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3436 hwrm_ring_free_send_msg(bp, ring,
3437 RING_FREE_REQ_RING_TYPE_TX,
3438 close_path ? cmpl_ring_id :
3439 INVALID_HW_RING_ID);
3440 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003441 }
3442 }
3443
Michael Chanedd0c2c2015-12-27 18:19:19 -05003444 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003445 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003446 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3447 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003448
Michael Chanedd0c2c2015-12-27 18:19:19 -05003449 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3450 hwrm_ring_free_send_msg(bp, ring,
3451 RING_FREE_REQ_RING_TYPE_RX,
3452 close_path ? cmpl_ring_id :
3453 INVALID_HW_RING_ID);
3454 ring->fw_ring_id = INVALID_HW_RING_ID;
3455 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003456 }
3457 }
3458
Michael Chanedd0c2c2015-12-27 18:19:19 -05003459 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003460 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003461 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
3462 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003463
Michael Chanedd0c2c2015-12-27 18:19:19 -05003464 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3465 hwrm_ring_free_send_msg(bp, ring,
3466 RING_FREE_REQ_RING_TYPE_RX,
3467 close_path ? cmpl_ring_id :
3468 INVALID_HW_RING_ID);
3469 ring->fw_ring_id = INVALID_HW_RING_ID;
3470 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003471 }
3472 }
3473
Michael Chanedd0c2c2015-12-27 18:19:19 -05003474 for (i = 0; i < bp->cp_nr_rings; i++) {
3475 struct bnxt_napi *bnapi = bp->bnapi[i];
3476 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3477 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04003478
Michael Chanedd0c2c2015-12-27 18:19:19 -05003479 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3480 hwrm_ring_free_send_msg(bp, ring,
3481 RING_FREE_REQ_RING_TYPE_CMPL,
3482 INVALID_HW_RING_ID);
3483 ring->fw_ring_id = INVALID_HW_RING_ID;
3484 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003485 }
3486 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003487}
3488
3489int bnxt_hwrm_set_coal(struct bnxt *bp)
3490{
3491 int i, rc = 0;
3492 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3493 u16 max_buf, max_buf_irq;
3494 u16 buf_tmr, buf_tmr_irq;
3495 u32 flags;
3496
3497 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
3498 -1, -1);
3499
3500 /* Each rx completion (2 records) should be DMAed immediately */
3501 max_buf = min_t(u16, bp->coal_bufs / 4, 2);
3502 /* max_buf must not be zero */
3503 max_buf = clamp_t(u16, max_buf, 1, 63);
3504 max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
3505 buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
3506 buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
3507
3508 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3509
3510 /* RING_IDLE generates more IRQs for lower latency. Enable it only
3511 * if coal_ticks is less than 25 us.
3512 */
3513 if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
3514 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
3515
3516 req.flags = cpu_to_le16(flags);
3517 req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
3518 req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
3519 req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
3520 req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
3521 req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
3522 req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
3523 req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
3524
3525 mutex_lock(&bp->hwrm_cmd_lock);
3526 for (i = 0; i < bp->cp_nr_rings; i++) {
3527 req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3528
3529 rc = _hwrm_send_message(bp, &req, sizeof(req),
3530 HWRM_CMD_TIMEOUT);
3531 if (rc)
3532 break;
3533 }
3534 mutex_unlock(&bp->hwrm_cmd_lock);
3535 return rc;
3536}
3537
3538static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
3539{
3540 int rc = 0, i;
3541 struct hwrm_stat_ctx_free_input req = {0};
3542
3543 if (!bp->bnapi)
3544 return 0;
3545
3546 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
3547
3548 mutex_lock(&bp->hwrm_cmd_lock);
3549 for (i = 0; i < bp->cp_nr_rings; i++) {
3550 struct bnxt_napi *bnapi = bp->bnapi[i];
3551 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3552
3553 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
3554 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
3555
3556 rc = _hwrm_send_message(bp, &req, sizeof(req),
3557 HWRM_CMD_TIMEOUT);
3558 if (rc)
3559 break;
3560
3561 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3562 }
3563 }
3564 mutex_unlock(&bp->hwrm_cmd_lock);
3565 return rc;
3566}
3567
3568static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
3569{
3570 int rc = 0, i;
3571 struct hwrm_stat_ctx_alloc_input req = {0};
3572 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3573
3574 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
3575
3576 req.update_period_ms = cpu_to_le32(1000);
3577
3578 mutex_lock(&bp->hwrm_cmd_lock);
3579 for (i = 0; i < bp->cp_nr_rings; i++) {
3580 struct bnxt_napi *bnapi = bp->bnapi[i];
3581 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3582
3583 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
3584
3585 rc = _hwrm_send_message(bp, &req, sizeof(req),
3586 HWRM_CMD_TIMEOUT);
3587 if (rc)
3588 break;
3589
3590 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
3591
3592 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
3593 }
3594 mutex_unlock(&bp->hwrm_cmd_lock);
3595 return 0;
3596}
3597
Michael Chan4a21b492015-12-27 18:19:26 -05003598int bnxt_hwrm_func_qcaps(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04003599{
3600 int rc = 0;
3601 struct hwrm_func_qcaps_input req = {0};
3602 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3603
3604 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
3605 req.fid = cpu_to_le16(0xffff);
3606
3607 mutex_lock(&bp->hwrm_cmd_lock);
3608 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3609 if (rc)
3610 goto hwrm_func_qcaps_exit;
3611
3612 if (BNXT_PF(bp)) {
3613 struct bnxt_pf_info *pf = &bp->pf;
3614
3615 pf->fw_fid = le16_to_cpu(resp->fid);
3616 pf->port_id = le16_to_cpu(resp->port_id);
3617 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
Jeffrey Huangbdd43472015-12-02 01:54:07 -05003618 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04003619 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3620 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3621 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04003622 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05003623 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3624 if (!pf->max_hw_ring_grps)
3625 pf->max_hw_ring_grps = pf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04003626 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3627 pf->max_vnics = le16_to_cpu(resp->max_vnics);
3628 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3629 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
3630 pf->max_vfs = le16_to_cpu(resp->max_vfs);
3631 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
3632 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
3633 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
3634 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
3635 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
3636 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
3637 } else {
Michael Chan379a80a2015-10-23 15:06:19 -04003638#ifdef CONFIG_BNXT_SRIOV
Michael Chanc0c050c2015-10-22 16:01:17 -04003639 struct bnxt_vf_info *vf = &bp->vf;
3640
3641 vf->fw_fid = le16_to_cpu(resp->fid);
3642 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
Jeffrey Huangbdd43472015-12-02 01:54:07 -05003643 if (is_valid_ether_addr(vf->mac_addr))
3644 /* overwrite netdev dev_adr with admin VF MAC */
3645 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3646 else
3647 random_ether_addr(bp->dev->dev_addr);
Michael Chanc0c050c2015-10-22 16:01:17 -04003648
3649 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3650 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3651 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3652 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05003653 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3654 if (!vf->max_hw_ring_grps)
3655 vf->max_hw_ring_grps = vf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04003656 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3657 vf->max_vnics = le16_to_cpu(resp->max_vnics);
3658 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
Michael Chan379a80a2015-10-23 15:06:19 -04003659#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04003660 }
3661
3662 bp->tx_push_thresh = 0;
3663 if (resp->flags &
3664 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
3665 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
3666
3667hwrm_func_qcaps_exit:
3668 mutex_unlock(&bp->hwrm_cmd_lock);
3669 return rc;
3670}
3671
3672static int bnxt_hwrm_func_reset(struct bnxt *bp)
3673{
3674 struct hwrm_func_reset_input req = {0};
3675
3676 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
3677 req.enables = 0;
3678
3679 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
3680}
3681
3682static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
3683{
3684 int rc = 0;
3685 struct hwrm_queue_qportcfg_input req = {0};
3686 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
3687 u8 i, *qptr;
3688
3689 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
3690
3691 mutex_lock(&bp->hwrm_cmd_lock);
3692 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3693 if (rc)
3694 goto qportcfg_exit;
3695
3696 if (!resp->max_configurable_queues) {
3697 rc = -EINVAL;
3698 goto qportcfg_exit;
3699 }
3700 bp->max_tc = resp->max_configurable_queues;
3701 if (bp->max_tc > BNXT_MAX_QUEUE)
3702 bp->max_tc = BNXT_MAX_QUEUE;
3703
3704 qptr = &resp->queue_id0;
3705 for (i = 0; i < bp->max_tc; i++) {
3706 bp->q_info[i].queue_id = *qptr++;
3707 bp->q_info[i].queue_profile = *qptr++;
3708 }
3709
3710qportcfg_exit:
3711 mutex_unlock(&bp->hwrm_cmd_lock);
3712 return rc;
3713}
3714
3715static int bnxt_hwrm_ver_get(struct bnxt *bp)
3716{
3717 int rc;
3718 struct hwrm_ver_get_input req = {0};
3719 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3720
3721 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3722 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3723 req.hwrm_intf_min = HWRM_VERSION_MINOR;
3724 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
3725 mutex_lock(&bp->hwrm_cmd_lock);
3726 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3727 if (rc)
3728 goto hwrm_ver_get_exit;
3729
3730 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
3731
Michael Chanc1935542015-12-27 18:19:28 -05003732 if (resp->hwrm_intf_maj < 1) {
3733 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04003734 resp->hwrm_intf_maj, resp->hwrm_intf_min,
Michael Chanc1935542015-12-27 18:19:28 -05003735 resp->hwrm_intf_upd);
3736 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04003737 }
3738 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
3739 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
3740 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
3741
3742hwrm_ver_get_exit:
3743 mutex_unlock(&bp->hwrm_cmd_lock);
3744 return rc;
3745}
3746
3747static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
3748{
3749 if (bp->vxlan_port_cnt) {
3750 bnxt_hwrm_tunnel_dst_port_free(
3751 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
3752 }
3753 bp->vxlan_port_cnt = 0;
3754 if (bp->nge_port_cnt) {
3755 bnxt_hwrm_tunnel_dst_port_free(
3756 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
3757 }
3758 bp->nge_port_cnt = 0;
3759}
3760
3761static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
3762{
3763 int rc, i;
3764 u32 tpa_flags = 0;
3765
3766 if (set_tpa)
3767 tpa_flags = bp->flags & BNXT_FLAG_TPA;
3768 for (i = 0; i < bp->nr_vnics; i++) {
3769 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
3770 if (rc) {
3771 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
3772 rc, i);
3773 return rc;
3774 }
3775 }
3776 return 0;
3777}
3778
3779static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
3780{
3781 int i;
3782
3783 for (i = 0; i < bp->nr_vnics; i++)
3784 bnxt_hwrm_vnic_set_rss(bp, i, false);
3785}
3786
3787static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
3788 bool irq_re_init)
3789{
3790 if (bp->vnic_info) {
3791 bnxt_hwrm_clear_vnic_filter(bp);
3792 /* clear all RSS setting before free vnic ctx */
3793 bnxt_hwrm_clear_vnic_rss(bp);
3794 bnxt_hwrm_vnic_ctx_free(bp);
3795 /* before free the vnic, undo the vnic tpa settings */
3796 if (bp->flags & BNXT_FLAG_TPA)
3797 bnxt_set_tpa(bp, false);
3798 bnxt_hwrm_vnic_free(bp);
3799 }
3800 bnxt_hwrm_ring_free(bp, close_path);
3801 bnxt_hwrm_ring_grp_free(bp);
3802 if (irq_re_init) {
3803 bnxt_hwrm_stat_ctx_free(bp);
3804 bnxt_hwrm_free_tunnel_ports(bp);
3805 }
3806}
3807
3808static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
3809{
3810 int rc;
3811
3812 /* allocate context for vnic */
3813 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
3814 if (rc) {
3815 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
3816 vnic_id, rc);
3817 goto vnic_setup_err;
3818 }
3819 bp->rsscos_nr_ctxs++;
3820
3821 /* configure default vnic, ring grp */
3822 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
3823 if (rc) {
3824 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
3825 vnic_id, rc);
3826 goto vnic_setup_err;
3827 }
3828
3829 /* Enable RSS hashing on vnic */
3830 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
3831 if (rc) {
3832 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
3833 vnic_id, rc);
3834 goto vnic_setup_err;
3835 }
3836
3837 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3838 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
3839 if (rc) {
3840 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
3841 vnic_id, rc);
3842 }
3843 }
3844
3845vnic_setup_err:
3846 return rc;
3847}
3848
3849static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
3850{
3851#ifdef CONFIG_RFS_ACCEL
3852 int i, rc = 0;
3853
3854 for (i = 0; i < bp->rx_nr_rings; i++) {
3855 u16 vnic_id = i + 1;
3856 u16 ring_id = i;
3857
3858 if (vnic_id >= bp->nr_vnics)
3859 break;
3860
3861 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
3862 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
3863 if (rc) {
3864 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
3865 vnic_id, rc);
3866 break;
3867 }
3868 rc = bnxt_setup_vnic(bp, vnic_id);
3869 if (rc)
3870 break;
3871 }
3872 return rc;
3873#else
3874 return 0;
3875#endif
3876}
3877
Michael Chanb664f002015-12-02 01:54:08 -05003878static int bnxt_cfg_rx_mode(struct bnxt *);
3879
Michael Chanc0c050c2015-10-22 16:01:17 -04003880static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3881{
3882 int rc = 0;
3883
3884 if (irq_re_init) {
3885 rc = bnxt_hwrm_stat_ctx_alloc(bp);
3886 if (rc) {
3887 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
3888 rc);
3889 goto err_out;
3890 }
3891 }
3892
3893 rc = bnxt_hwrm_ring_alloc(bp);
3894 if (rc) {
3895 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
3896 goto err_out;
3897 }
3898
3899 rc = bnxt_hwrm_ring_grp_alloc(bp);
3900 if (rc) {
3901 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
3902 goto err_out;
3903 }
3904
3905 /* default vnic 0 */
3906 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
3907 if (rc) {
3908 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
3909 goto err_out;
3910 }
3911
3912 rc = bnxt_setup_vnic(bp, 0);
3913 if (rc)
3914 goto err_out;
3915
3916 if (bp->flags & BNXT_FLAG_RFS) {
3917 rc = bnxt_alloc_rfs_vnics(bp);
3918 if (rc)
3919 goto err_out;
3920 }
3921
3922 if (bp->flags & BNXT_FLAG_TPA) {
3923 rc = bnxt_set_tpa(bp, true);
3924 if (rc)
3925 goto err_out;
3926 }
3927
3928 if (BNXT_VF(bp))
3929 bnxt_update_vf_mac(bp);
3930
3931 /* Filter for default vnic 0 */
3932 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
3933 if (rc) {
3934 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
3935 goto err_out;
3936 }
3937 bp->vnic_info[0].uc_filter_count = 1;
3938
Michael Chanc1935542015-12-27 18:19:28 -05003939 bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
Michael Chanc0c050c2015-10-22 16:01:17 -04003940
3941 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
3942 bp->vnic_info[0].rx_mask |=
3943 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
3944
Michael Chanb664f002015-12-02 01:54:08 -05003945 rc = bnxt_cfg_rx_mode(bp);
3946 if (rc)
Michael Chanc0c050c2015-10-22 16:01:17 -04003947 goto err_out;
Michael Chanc0c050c2015-10-22 16:01:17 -04003948
3949 rc = bnxt_hwrm_set_coal(bp);
3950 if (rc)
3951 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
3952 rc);
3953
3954 return 0;
3955
3956err_out:
3957 bnxt_hwrm_resource_free(bp, 0, true);
3958
3959 return rc;
3960}
3961
3962static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
3963{
3964 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
3965 return 0;
3966}
3967
3968static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
3969{
3970 bnxt_init_rx_rings(bp);
3971 bnxt_init_tx_rings(bp);
3972 bnxt_init_ring_grps(bp, irq_re_init);
3973 bnxt_init_vnics(bp);
3974
3975 return bnxt_init_chip(bp, irq_re_init);
3976}
3977
3978static void bnxt_disable_int(struct bnxt *bp)
3979{
3980 int i;
3981
3982 if (!bp->bnapi)
3983 return;
3984
3985 for (i = 0; i < bp->cp_nr_rings; i++) {
3986 struct bnxt_napi *bnapi = bp->bnapi[i];
3987 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3988
3989 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3990 }
3991}
3992
3993static void bnxt_enable_int(struct bnxt *bp)
3994{
3995 int i;
3996
3997 atomic_set(&bp->intr_sem, 0);
3998 for (i = 0; i < bp->cp_nr_rings; i++) {
3999 struct bnxt_napi *bnapi = bp->bnapi[i];
4000 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4001
4002 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4003 }
4004}
4005
4006static int bnxt_set_real_num_queues(struct bnxt *bp)
4007{
4008 int rc;
4009 struct net_device *dev = bp->dev;
4010
4011 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4012 if (rc)
4013 return rc;
4014
4015 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4016 if (rc)
4017 return rc;
4018
4019#ifdef CONFIG_RFS_ACCEL
Michael Chan45019a12015-12-27 18:19:22 -05004020 if (bp->flags & BNXT_FLAG_RFS)
Michael Chanc0c050c2015-10-22 16:01:17 -04004021 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004022#endif
4023
4024 return rc;
4025}
4026
4027static int bnxt_setup_msix(struct bnxt *bp)
4028{
4029 struct msix_entry *msix_ent;
4030 struct net_device *dev = bp->dev;
4031 int i, total_vecs, rc = 0;
4032 const int len = sizeof(bp->irq_tbl[0].name);
4033
4034 bp->flags &= ~BNXT_FLAG_USING_MSIX;
4035 total_vecs = bp->cp_nr_rings;
4036
4037 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4038 if (!msix_ent)
4039 return -ENOMEM;
4040
4041 for (i = 0; i < total_vecs; i++) {
4042 msix_ent[i].entry = i;
4043 msix_ent[i].vector = 0;
4044 }
4045
4046 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
4047 if (total_vecs < 0) {
4048 rc = -ENODEV;
4049 goto msix_setup_exit;
4050 }
4051
4052 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4053 if (bp->irq_tbl) {
4054 int tcs;
4055
4056 /* Trim rings based upon num of vectors allocated */
4057 bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
4058 bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
4059 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4060 tcs = netdev_get_num_tc(dev);
4061 if (tcs > 1) {
4062 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4063 if (bp->tx_nr_rings_per_tc == 0) {
4064 netdev_reset_tc(dev);
4065 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4066 } else {
4067 int i, off, count;
4068
4069 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4070 for (i = 0; i < tcs; i++) {
4071 count = bp->tx_nr_rings_per_tc;
4072 off = i * count;
4073 netdev_set_tc_queue(dev, i, count, off);
4074 }
4075 }
4076 }
4077 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
4078
4079 for (i = 0; i < bp->cp_nr_rings; i++) {
4080 bp->irq_tbl[i].vector = msix_ent[i].vector;
4081 snprintf(bp->irq_tbl[i].name, len,
4082 "%s-%s-%d", dev->name, "TxRx", i);
4083 bp->irq_tbl[i].handler = bnxt_msix;
4084 }
4085 rc = bnxt_set_real_num_queues(bp);
4086 if (rc)
4087 goto msix_setup_exit;
4088 } else {
4089 rc = -ENOMEM;
4090 goto msix_setup_exit;
4091 }
4092 bp->flags |= BNXT_FLAG_USING_MSIX;
4093 kfree(msix_ent);
4094 return 0;
4095
4096msix_setup_exit:
4097 netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
4098 pci_disable_msix(bp->pdev);
4099 kfree(msix_ent);
4100 return rc;
4101}
4102
4103static int bnxt_setup_inta(struct bnxt *bp)
4104{
4105 int rc;
4106 const int len = sizeof(bp->irq_tbl[0].name);
4107
4108 if (netdev_get_num_tc(bp->dev))
4109 netdev_reset_tc(bp->dev);
4110
4111 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
4112 if (!bp->irq_tbl) {
4113 rc = -ENOMEM;
4114 return rc;
4115 }
4116 bp->rx_nr_rings = 1;
4117 bp->tx_nr_rings = 1;
4118 bp->cp_nr_rings = 1;
4119 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4120 bp->irq_tbl[0].vector = bp->pdev->irq;
4121 snprintf(bp->irq_tbl[0].name, len,
4122 "%s-%s-%d", bp->dev->name, "TxRx", 0);
4123 bp->irq_tbl[0].handler = bnxt_inta;
4124 rc = bnxt_set_real_num_queues(bp);
4125 return rc;
4126}
4127
4128static int bnxt_setup_int_mode(struct bnxt *bp)
4129{
4130 int rc = 0;
4131
4132 if (bp->flags & BNXT_FLAG_MSIX_CAP)
4133 rc = bnxt_setup_msix(bp);
4134
4135 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
4136 /* fallback to INTA */
4137 rc = bnxt_setup_inta(bp);
4138 }
4139 return rc;
4140}
4141
4142static void bnxt_free_irq(struct bnxt *bp)
4143{
4144 struct bnxt_irq *irq;
4145 int i;
4146
4147#ifdef CONFIG_RFS_ACCEL
4148 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4149 bp->dev->rx_cpu_rmap = NULL;
4150#endif
4151 if (!bp->irq_tbl)
4152 return;
4153
4154 for (i = 0; i < bp->cp_nr_rings; i++) {
4155 irq = &bp->irq_tbl[i];
4156 if (irq->requested)
4157 free_irq(irq->vector, bp->bnapi[i]);
4158 irq->requested = 0;
4159 }
4160 if (bp->flags & BNXT_FLAG_USING_MSIX)
4161 pci_disable_msix(bp->pdev);
4162 kfree(bp->irq_tbl);
4163 bp->irq_tbl = NULL;
4164}
4165
4166static int bnxt_request_irq(struct bnxt *bp)
4167{
4168 int i, rc = 0;
4169 unsigned long flags = 0;
4170#ifdef CONFIG_RFS_ACCEL
4171 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4172#endif
4173
4174 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4175 flags = IRQF_SHARED;
4176
4177 for (i = 0; i < bp->cp_nr_rings; i++) {
4178 struct bnxt_irq *irq = &bp->irq_tbl[i];
4179#ifdef CONFIG_RFS_ACCEL
4180 if (rmap && (i < bp->rx_nr_rings)) {
4181 rc = irq_cpu_rmap_add(rmap, irq->vector);
4182 if (rc)
4183 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
4184 i);
4185 }
4186#endif
4187 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
4188 bp->bnapi[i]);
4189 if (rc)
4190 break;
4191
4192 irq->requested = 1;
4193 }
4194 return rc;
4195}
4196
4197static void bnxt_del_napi(struct bnxt *bp)
4198{
4199 int i;
4200
4201 if (!bp->bnapi)
4202 return;
4203
4204 for (i = 0; i < bp->cp_nr_rings; i++) {
4205 struct bnxt_napi *bnapi = bp->bnapi[i];
4206
4207 napi_hash_del(&bnapi->napi);
4208 netif_napi_del(&bnapi->napi);
4209 }
4210}
4211
4212static void bnxt_init_napi(struct bnxt *bp)
4213{
4214 int i;
4215 struct bnxt_napi *bnapi;
4216
4217 if (bp->flags & BNXT_FLAG_USING_MSIX) {
4218 for (i = 0; i < bp->cp_nr_rings; i++) {
4219 bnapi = bp->bnapi[i];
4220 netif_napi_add(bp->dev, &bnapi->napi,
4221 bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04004222 }
4223 } else {
4224 bnapi = bp->bnapi[0];
4225 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04004226 }
4227}
4228
4229static void bnxt_disable_napi(struct bnxt *bp)
4230{
4231 int i;
4232
4233 if (!bp->bnapi)
4234 return;
4235
4236 for (i = 0; i < bp->cp_nr_rings; i++) {
4237 napi_disable(&bp->bnapi[i]->napi);
4238 bnxt_disable_poll(bp->bnapi[i]);
4239 }
4240}
4241
4242static void bnxt_enable_napi(struct bnxt *bp)
4243{
4244 int i;
4245
4246 for (i = 0; i < bp->cp_nr_rings; i++) {
4247 bnxt_enable_poll(bp->bnapi[i]);
4248 napi_enable(&bp->bnapi[i]->napi);
4249 }
4250}
4251
4252static void bnxt_tx_disable(struct bnxt *bp)
4253{
4254 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04004255 struct bnxt_tx_ring_info *txr;
4256 struct netdev_queue *txq;
4257
Michael Chanb6ab4b02016-01-02 23:44:59 -05004258 if (bp->tx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004259 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004260 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04004261 txq = netdev_get_tx_queue(bp->dev, i);
4262 __netif_tx_lock(txq, smp_processor_id());
4263 txr->dev_state = BNXT_DEV_STATE_CLOSING;
4264 __netif_tx_unlock(txq);
4265 }
4266 }
4267 /* Stop all TX queues */
4268 netif_tx_disable(bp->dev);
4269 netif_carrier_off(bp->dev);
4270}
4271
4272static void bnxt_tx_enable(struct bnxt *bp)
4273{
4274 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04004275 struct bnxt_tx_ring_info *txr;
4276 struct netdev_queue *txq;
4277
4278 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004279 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04004280 txq = netdev_get_tx_queue(bp->dev, i);
4281 txr->dev_state = 0;
4282 }
4283 netif_tx_wake_all_queues(bp->dev);
4284 if (bp->link_info.link_up)
4285 netif_carrier_on(bp->dev);
4286}
4287
4288static void bnxt_report_link(struct bnxt *bp)
4289{
4290 if (bp->link_info.link_up) {
4291 const char *duplex;
4292 const char *flow_ctrl;
4293 u16 speed;
4294
4295 netif_carrier_on(bp->dev);
4296 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
4297 duplex = "full";
4298 else
4299 duplex = "half";
4300 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
4301 flow_ctrl = "ON - receive & transmit";
4302 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
4303 flow_ctrl = "ON - transmit";
4304 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
4305 flow_ctrl = "ON - receive";
4306 else
4307 flow_ctrl = "none";
4308 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
4309 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
4310 speed, duplex, flow_ctrl);
4311 } else {
4312 netif_carrier_off(bp->dev);
4313 netdev_err(bp->dev, "NIC Link is Down\n");
4314 }
4315}
4316
4317static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4318{
4319 int rc = 0;
4320 struct bnxt_link_info *link_info = &bp->link_info;
4321 struct hwrm_port_phy_qcfg_input req = {0};
4322 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4323 u8 link_up = link_info->link_up;
4324
4325 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
4326
4327 mutex_lock(&bp->hwrm_cmd_lock);
4328 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4329 if (rc) {
4330 mutex_unlock(&bp->hwrm_cmd_lock);
4331 return rc;
4332 }
4333
4334 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
4335 link_info->phy_link_status = resp->link;
4336 link_info->duplex = resp->duplex;
4337 link_info->pause = resp->pause;
4338 link_info->auto_mode = resp->auto_mode;
4339 link_info->auto_pause_setting = resp->auto_pause;
4340 link_info->force_pause_setting = resp->force_pause;
Michael Chanc1935542015-12-27 18:19:28 -05004341 link_info->duplex_setting = resp->duplex;
Michael Chanc0c050c2015-10-22 16:01:17 -04004342 if (link_info->phy_link_status == BNXT_LINK_LINK)
4343 link_info->link_speed = le16_to_cpu(resp->link_speed);
4344 else
4345 link_info->link_speed = 0;
4346 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
4347 link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
4348 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
4349 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
4350 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
4351 link_info->phy_ver[0] = resp->phy_maj;
4352 link_info->phy_ver[1] = resp->phy_min;
4353 link_info->phy_ver[2] = resp->phy_bld;
4354 link_info->media_type = resp->media_type;
4355 link_info->transceiver = resp->transceiver_type;
4356 link_info->phy_addr = resp->phy_addr;
4357
4358 /* TODO: need to add more logic to report VF link */
4359 if (chng_link_state) {
4360 if (link_info->phy_link_status == BNXT_LINK_LINK)
4361 link_info->link_up = 1;
4362 else
4363 link_info->link_up = 0;
4364 if (link_up != link_info->link_up)
4365 bnxt_report_link(bp);
4366 } else {
4367 /* alwasy link down if not require to update link state */
4368 link_info->link_up = 0;
4369 }
4370 mutex_unlock(&bp->hwrm_cmd_lock);
4371 return 0;
4372}
4373
4374static void
4375bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4376{
4377 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
4378 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4379 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4380 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4381 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4382 req->enables |=
4383 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4384 } else {
4385 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4386 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
4387 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4388 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
4389 req->enables |=
4390 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
4391 }
4392}
4393
4394static void bnxt_hwrm_set_link_common(struct bnxt *bp,
4395 struct hwrm_port_phy_cfg_input *req)
4396{
4397 u8 autoneg = bp->link_info.autoneg;
4398 u16 fw_link_speed = bp->link_info.req_link_speed;
4399 u32 advertising = bp->link_info.advertising;
4400
4401 if (autoneg & BNXT_AUTONEG_SPEED) {
4402 req->auto_mode |=
4403 PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
4404
4405 req->enables |= cpu_to_le32(
4406 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
4407 req->auto_link_speed_mask = cpu_to_le16(advertising);
4408
4409 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
4410 req->flags |=
4411 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
4412 } else {
4413 req->force_link_speed = cpu_to_le16(fw_link_speed);
4414 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
4415 }
4416
4417 /* currently don't support half duplex */
4418 req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
4419 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
4420 /* tell chimp that the setting takes effect immediately */
4421 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4422}
4423
4424int bnxt_hwrm_set_pause(struct bnxt *bp)
4425{
4426 struct hwrm_port_phy_cfg_input req = {0};
4427 int rc;
4428
4429 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4430 bnxt_hwrm_set_pause_common(bp, &req);
4431
4432 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
4433 bp->link_info.force_link_chng)
4434 bnxt_hwrm_set_link_common(bp, &req);
4435
4436 mutex_lock(&bp->hwrm_cmd_lock);
4437 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4438 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
4439 /* since changing of pause setting doesn't trigger any link
4440 * change event, the driver needs to update the current pause
4441 * result upon successfully return of the phy_cfg command
4442 */
4443 bp->link_info.pause =
4444 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
4445 bp->link_info.auto_pause_setting = 0;
4446 if (!bp->link_info.force_link_chng)
4447 bnxt_report_link(bp);
4448 }
4449 bp->link_info.force_link_chng = false;
4450 mutex_unlock(&bp->hwrm_cmd_lock);
4451 return rc;
4452}
4453
4454int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
4455{
4456 struct hwrm_port_phy_cfg_input req = {0};
4457
4458 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4459 if (set_pause)
4460 bnxt_hwrm_set_pause_common(bp, &req);
4461
4462 bnxt_hwrm_set_link_common(bp, &req);
4463 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4464}
4465
4466static int bnxt_update_phy_setting(struct bnxt *bp)
4467{
4468 int rc;
4469 bool update_link = false;
4470 bool update_pause = false;
4471 struct bnxt_link_info *link_info = &bp->link_info;
4472
4473 rc = bnxt_update_link(bp, true);
4474 if (rc) {
4475 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
4476 rc);
4477 return rc;
4478 }
4479 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4480 link_info->auto_pause_setting != link_info->req_flow_ctrl)
4481 update_pause = true;
4482 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4483 link_info->force_pause_setting != link_info->req_flow_ctrl)
4484 update_pause = true;
4485 if (link_info->req_duplex != link_info->duplex_setting)
4486 update_link = true;
4487 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4488 if (BNXT_AUTO_MODE(link_info->auto_mode))
4489 update_link = true;
4490 if (link_info->req_link_speed != link_info->force_link_speed)
4491 update_link = true;
4492 } else {
4493 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4494 update_link = true;
4495 if (link_info->advertising != link_info->auto_link_speeds)
4496 update_link = true;
4497 if (link_info->req_link_speed != link_info->auto_link_speed)
4498 update_link = true;
4499 }
4500
4501 if (update_link)
4502 rc = bnxt_hwrm_set_link_setting(bp, update_pause);
4503 else if (update_pause)
4504 rc = bnxt_hwrm_set_pause(bp);
4505 if (rc) {
4506 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
4507 rc);
4508 return rc;
4509 }
4510
4511 return rc;
4512}
4513
Jeffrey Huang11809492015-11-05 16:25:49 -05004514/* Common routine to pre-map certain register block to different GRC window.
4515 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
4516 * in PF and 3 windows in VF that can be customized to map in different
4517 * register blocks.
4518 */
4519static void bnxt_preset_reg_win(struct bnxt *bp)
4520{
4521 if (BNXT_PF(bp)) {
4522 /* CAG registers map to GRC window #4 */
4523 writel(BNXT_CAG_REG_BASE,
4524 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
4525 }
4526}
4527
Michael Chanc0c050c2015-10-22 16:01:17 -04004528static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4529{
4530 int rc = 0;
4531
Jeffrey Huang11809492015-11-05 16:25:49 -05004532 bnxt_preset_reg_win(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04004533 netif_carrier_off(bp->dev);
4534 if (irq_re_init) {
4535 rc = bnxt_setup_int_mode(bp);
4536 if (rc) {
4537 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
4538 rc);
4539 return rc;
4540 }
4541 }
4542 if ((bp->flags & BNXT_FLAG_RFS) &&
4543 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
4544 /* disable RFS if falling back to INTA */
4545 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
4546 bp->flags &= ~BNXT_FLAG_RFS;
4547 }
4548
4549 rc = bnxt_alloc_mem(bp, irq_re_init);
4550 if (rc) {
4551 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
4552 goto open_err_free_mem;
4553 }
4554
4555 if (irq_re_init) {
4556 bnxt_init_napi(bp);
4557 rc = bnxt_request_irq(bp);
4558 if (rc) {
4559 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
4560 goto open_err;
4561 }
4562 }
4563
4564 bnxt_enable_napi(bp);
4565
4566 rc = bnxt_init_nic(bp, irq_re_init);
4567 if (rc) {
4568 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
4569 goto open_err;
4570 }
4571
4572 if (link_re_init) {
4573 rc = bnxt_update_phy_setting(bp);
4574 if (rc)
4575 goto open_err;
4576 }
4577
4578 if (irq_re_init) {
4579#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
4580 vxlan_get_rx_port(bp->dev);
4581#endif
4582 if (!bnxt_hwrm_tunnel_dst_port_alloc(
4583 bp, htons(0x17c1),
4584 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
4585 bp->nge_port_cnt = 1;
4586 }
4587
Michael Chancaefe522015-12-09 19:35:42 -05004588 set_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04004589 bnxt_enable_int(bp);
4590 /* Enable TX queues */
4591 bnxt_tx_enable(bp);
4592 mod_timer(&bp->timer, jiffies + bp->current_interval);
4593
4594 return 0;
4595
4596open_err:
4597 bnxt_disable_napi(bp);
4598 bnxt_del_napi(bp);
4599
4600open_err_free_mem:
4601 bnxt_free_skbs(bp);
4602 bnxt_free_irq(bp);
4603 bnxt_free_mem(bp, true);
4604 return rc;
4605}
4606
4607/* rtnl_lock held */
4608int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4609{
4610 int rc = 0;
4611
4612 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
4613 if (rc) {
4614 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
4615 dev_close(bp->dev);
4616 }
4617 return rc;
4618}
4619
4620static int bnxt_open(struct net_device *dev)
4621{
4622 struct bnxt *bp = netdev_priv(dev);
4623 int rc = 0;
4624
4625 rc = bnxt_hwrm_func_reset(bp);
4626 if (rc) {
4627 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
4628 rc);
4629 rc = -1;
4630 return rc;
4631 }
4632 return __bnxt_open_nic(bp, true, true);
4633}
4634
4635static void bnxt_disable_int_sync(struct bnxt *bp)
4636{
4637 int i;
4638
4639 atomic_inc(&bp->intr_sem);
4640 if (!netif_running(bp->dev))
4641 return;
4642
4643 bnxt_disable_int(bp);
4644 for (i = 0; i < bp->cp_nr_rings; i++)
4645 synchronize_irq(bp->irq_tbl[i].vector);
4646}
4647
4648int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4649{
4650 int rc = 0;
4651
4652#ifdef CONFIG_BNXT_SRIOV
4653 if (bp->sriov_cfg) {
4654 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
4655 !bp->sriov_cfg,
4656 BNXT_SRIOV_CFG_WAIT_TMO);
4657 if (rc)
4658 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
4659 }
4660#endif
4661 /* Change device state to avoid TX queue wake up's */
4662 bnxt_tx_disable(bp);
4663
Michael Chancaefe522015-12-09 19:35:42 -05004664 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chan4cebdce2015-12-09 19:35:43 -05004665 smp_mb__after_atomic();
4666 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
4667 msleep(20);
Michael Chanc0c050c2015-10-22 16:01:17 -04004668
4669 /* Flush rings before disabling interrupts */
4670 bnxt_shutdown_nic(bp, irq_re_init);
4671
4672 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
4673
4674 bnxt_disable_napi(bp);
4675 bnxt_disable_int_sync(bp);
4676 del_timer_sync(&bp->timer);
4677 bnxt_free_skbs(bp);
4678
4679 if (irq_re_init) {
4680 bnxt_free_irq(bp);
4681 bnxt_del_napi(bp);
4682 }
4683 bnxt_free_mem(bp, irq_re_init);
4684 return rc;
4685}
4686
4687static int bnxt_close(struct net_device *dev)
4688{
4689 struct bnxt *bp = netdev_priv(dev);
4690
4691 bnxt_close_nic(bp, true, true);
4692 return 0;
4693}
4694
4695/* rtnl_lock held */
4696static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4697{
4698 switch (cmd) {
4699 case SIOCGMIIPHY:
4700 /* fallthru */
4701 case SIOCGMIIREG: {
4702 if (!netif_running(dev))
4703 return -EAGAIN;
4704
4705 return 0;
4706 }
4707
4708 case SIOCSMIIREG:
4709 if (!netif_running(dev))
4710 return -EAGAIN;
4711
4712 return 0;
4713
4714 default:
4715 /* do nothing */
4716 break;
4717 }
4718 return -EOPNOTSUPP;
4719}
4720
4721static struct rtnl_link_stats64 *
4722bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4723{
4724 u32 i;
4725 struct bnxt *bp = netdev_priv(dev);
4726
4727 memset(stats, 0, sizeof(struct rtnl_link_stats64));
4728
4729 if (!bp->bnapi)
4730 return stats;
4731
4732 /* TODO check if we need to synchronize with bnxt_close path */
4733 for (i = 0; i < bp->cp_nr_rings; i++) {
4734 struct bnxt_napi *bnapi = bp->bnapi[i];
4735 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4736 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
4737
4738 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
4739 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
4740 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
4741
4742 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
4743 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
4744 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
4745
4746 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
4747 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
4748 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
4749
4750 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
4751 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
4752 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
4753
4754 stats->rx_missed_errors +=
4755 le64_to_cpu(hw_stats->rx_discard_pkts);
4756
4757 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
4758
4759 stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
4760
4761 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
4762 }
4763
4764 return stats;
4765}
4766
4767static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
4768{
4769 struct net_device *dev = bp->dev;
4770 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4771 struct netdev_hw_addr *ha;
4772 u8 *haddr;
4773 int mc_count = 0;
4774 bool update = false;
4775 int off = 0;
4776
4777 netdev_for_each_mc_addr(ha, dev) {
4778 if (mc_count >= BNXT_MAX_MC_ADDRS) {
4779 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4780 vnic->mc_list_count = 0;
4781 return false;
4782 }
4783 haddr = ha->addr;
4784 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
4785 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
4786 update = true;
4787 }
4788 off += ETH_ALEN;
4789 mc_count++;
4790 }
4791 if (mc_count)
4792 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
4793
4794 if (mc_count != vnic->mc_list_count) {
4795 vnic->mc_list_count = mc_count;
4796 update = true;
4797 }
4798 return update;
4799}
4800
4801static bool bnxt_uc_list_updated(struct bnxt *bp)
4802{
4803 struct net_device *dev = bp->dev;
4804 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4805 struct netdev_hw_addr *ha;
4806 int off = 0;
4807
4808 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
4809 return true;
4810
4811 netdev_for_each_uc_addr(ha, dev) {
4812 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
4813 return true;
4814
4815 off += ETH_ALEN;
4816 }
4817 return false;
4818}
4819
4820static void bnxt_set_rx_mode(struct net_device *dev)
4821{
4822 struct bnxt *bp = netdev_priv(dev);
4823 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4824 u32 mask = vnic->rx_mask;
4825 bool mc_update = false;
4826 bool uc_update;
4827
4828 if (!netif_running(dev))
4829 return;
4830
4831 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
4832 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
4833 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
4834
4835 /* Only allow PF to be in promiscuous mode */
4836 if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
4837 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4838
4839 uc_update = bnxt_uc_list_updated(bp);
4840
4841 if (dev->flags & IFF_ALLMULTI) {
4842 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4843 vnic->mc_list_count = 0;
4844 } else {
4845 mc_update = bnxt_mc_list_updated(bp, &mask);
4846 }
4847
4848 if (mask != vnic->rx_mask || uc_update || mc_update) {
4849 vnic->rx_mask = mask;
4850
4851 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
4852 schedule_work(&bp->sp_task);
4853 }
4854}
4855
Michael Chanb664f002015-12-02 01:54:08 -05004856static int bnxt_cfg_rx_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04004857{
4858 struct net_device *dev = bp->dev;
4859 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4860 struct netdev_hw_addr *ha;
4861 int i, off = 0, rc;
4862 bool uc_update;
4863
4864 netif_addr_lock_bh(dev);
4865 uc_update = bnxt_uc_list_updated(bp);
4866 netif_addr_unlock_bh(dev);
4867
4868 if (!uc_update)
4869 goto skip_uc;
4870
4871 mutex_lock(&bp->hwrm_cmd_lock);
4872 for (i = 1; i < vnic->uc_filter_count; i++) {
4873 struct hwrm_cfa_l2_filter_free_input req = {0};
4874
4875 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
4876 -1);
4877
4878 req.l2_filter_id = vnic->fw_l2_filter_id[i];
4879
4880 rc = _hwrm_send_message(bp, &req, sizeof(req),
4881 HWRM_CMD_TIMEOUT);
4882 }
4883 mutex_unlock(&bp->hwrm_cmd_lock);
4884
4885 vnic->uc_filter_count = 1;
4886
4887 netif_addr_lock_bh(dev);
4888 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
4889 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4890 } else {
4891 netdev_for_each_uc_addr(ha, dev) {
4892 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
4893 off += ETH_ALEN;
4894 vnic->uc_filter_count++;
4895 }
4896 }
4897 netif_addr_unlock_bh(dev);
4898
4899 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
4900 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
4901 if (rc) {
4902 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
4903 rc);
4904 vnic->uc_filter_count = i;
Michael Chanb664f002015-12-02 01:54:08 -05004905 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04004906 }
4907 }
4908
4909skip_uc:
4910 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
4911 if (rc)
4912 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
4913 rc);
Michael Chanb664f002015-12-02 01:54:08 -05004914
4915 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04004916}
4917
Michael Chan2bcfa6f2015-12-27 18:19:24 -05004918static bool bnxt_rfs_capable(struct bnxt *bp)
4919{
4920#ifdef CONFIG_RFS_ACCEL
4921 struct bnxt_pf_info *pf = &bp->pf;
4922 int vnics;
4923
4924 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
4925 return false;
4926
4927 vnics = 1 + bp->rx_nr_rings;
4928 if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
4929 return false;
4930
4931 return true;
4932#else
4933 return false;
4934#endif
4935}
4936
Michael Chanc0c050c2015-10-22 16:01:17 -04004937static netdev_features_t bnxt_fix_features(struct net_device *dev,
4938 netdev_features_t features)
4939{
Michael Chan2bcfa6f2015-12-27 18:19:24 -05004940 struct bnxt *bp = netdev_priv(dev);
4941
4942 if (!bnxt_rfs_capable(bp))
4943 features &= ~NETIF_F_NTUPLE;
Michael Chanc0c050c2015-10-22 16:01:17 -04004944 return features;
4945}
4946
4947static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
4948{
4949 struct bnxt *bp = netdev_priv(dev);
4950 u32 flags = bp->flags;
4951 u32 changes;
4952 int rc = 0;
4953 bool re_init = false;
4954 bool update_tpa = false;
4955
4956 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
4957 if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
4958 flags |= BNXT_FLAG_GRO;
4959 if (features & NETIF_F_LRO)
4960 flags |= BNXT_FLAG_LRO;
4961
4962 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4963 flags |= BNXT_FLAG_STRIP_VLAN;
4964
4965 if (features & NETIF_F_NTUPLE)
4966 flags |= BNXT_FLAG_RFS;
4967
4968 changes = flags ^ bp->flags;
4969 if (changes & BNXT_FLAG_TPA) {
4970 update_tpa = true;
4971 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
4972 (flags & BNXT_FLAG_TPA) == 0)
4973 re_init = true;
4974 }
4975
4976 if (changes & ~BNXT_FLAG_TPA)
4977 re_init = true;
4978
4979 if (flags != bp->flags) {
4980 u32 old_flags = bp->flags;
4981
4982 bp->flags = flags;
4983
Michael Chan2bcfa6f2015-12-27 18:19:24 -05004984 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004985 if (update_tpa)
4986 bnxt_set_ring_params(bp);
4987 return rc;
4988 }
4989
4990 if (re_init) {
4991 bnxt_close_nic(bp, false, false);
4992 if (update_tpa)
4993 bnxt_set_ring_params(bp);
4994
4995 return bnxt_open_nic(bp, false, false);
4996 }
4997 if (update_tpa) {
4998 rc = bnxt_set_tpa(bp,
4999 (flags & BNXT_FLAG_TPA) ?
5000 true : false);
5001 if (rc)
5002 bp->flags = old_flags;
5003 }
5004 }
5005 return rc;
5006}
5007
Michael Chan9f554592016-01-02 23:44:58 -05005008static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
5009{
Michael Chanb6ab4b02016-01-02 23:44:59 -05005010 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05005011 int i = bnapi->index;
5012
Michael Chan3b2b7d92016-01-02 23:45:00 -05005013 if (!txr)
5014 return;
5015
Michael Chan9f554592016-01-02 23:44:58 -05005016 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
5017 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
5018 txr->tx_cons);
5019}
5020
5021static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
5022{
Michael Chanb6ab4b02016-01-02 23:44:59 -05005023 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05005024 int i = bnapi->index;
5025
Michael Chan3b2b7d92016-01-02 23:45:00 -05005026 if (!rxr)
5027 return;
5028
Michael Chan9f554592016-01-02 23:44:58 -05005029 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
5030 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
5031 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
5032 rxr->rx_sw_agg_prod);
5033}
5034
5035static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
5036{
5037 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5038 int i = bnapi->index;
5039
5040 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
5041 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
5042}
5043
Michael Chanc0c050c2015-10-22 16:01:17 -04005044static void bnxt_dbg_dump_states(struct bnxt *bp)
5045{
5046 int i;
5047 struct bnxt_napi *bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04005048
5049 for (i = 0; i < bp->cp_nr_rings; i++) {
5050 bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005051 if (netif_msg_drv(bp)) {
Michael Chan9f554592016-01-02 23:44:58 -05005052 bnxt_dump_tx_sw_state(bnapi);
5053 bnxt_dump_rx_sw_state(bnapi);
5054 bnxt_dump_cp_sw_state(bnapi);
Michael Chanc0c050c2015-10-22 16:01:17 -04005055 }
5056 }
5057}
5058
5059static void bnxt_reset_task(struct bnxt *bp)
5060{
5061 bnxt_dbg_dump_states(bp);
Michael Chan028de142015-12-09 19:35:44 -05005062 if (netif_running(bp->dev)) {
5063 bnxt_close_nic(bp, false, false);
5064 bnxt_open_nic(bp, false, false);
5065 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005066}
5067
5068static void bnxt_tx_timeout(struct net_device *dev)
5069{
5070 struct bnxt *bp = netdev_priv(dev);
5071
5072 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
5073 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
5074 schedule_work(&bp->sp_task);
5075}
5076
5077#ifdef CONFIG_NET_POLL_CONTROLLER
5078static void bnxt_poll_controller(struct net_device *dev)
5079{
5080 struct bnxt *bp = netdev_priv(dev);
5081 int i;
5082
5083 for (i = 0; i < bp->cp_nr_rings; i++) {
5084 struct bnxt_irq *irq = &bp->irq_tbl[i];
5085
5086 disable_irq(irq->vector);
5087 irq->handler(irq->vector, bp->bnapi[i]);
5088 enable_irq(irq->vector);
5089 }
5090}
5091#endif
5092
5093static void bnxt_timer(unsigned long data)
5094{
5095 struct bnxt *bp = (struct bnxt *)data;
5096 struct net_device *dev = bp->dev;
5097
5098 if (!netif_running(dev))
5099 return;
5100
5101 if (atomic_read(&bp->intr_sem) != 0)
5102 goto bnxt_restart_timer;
5103
5104bnxt_restart_timer:
5105 mod_timer(&bp->timer, jiffies + bp->current_interval);
5106}
5107
5108static void bnxt_cfg_ntp_filters(struct bnxt *);
5109
5110static void bnxt_sp_task(struct work_struct *work)
5111{
5112 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5113 int rc;
5114
Michael Chan4cebdce2015-12-09 19:35:43 -05005115 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5116 smp_mb__after_atomic();
5117 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5118 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04005119 return;
Michael Chan4cebdce2015-12-09 19:35:43 -05005120 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005121
5122 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5123 bnxt_cfg_rx_mode(bp);
5124
5125 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
5126 bnxt_cfg_ntp_filters(bp);
5127 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
5128 rc = bnxt_update_link(bp, true);
5129 if (rc)
5130 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
5131 rc);
5132 }
5133 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
5134 bnxt_hwrm_exec_fwd_req(bp);
5135 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
5136 bnxt_hwrm_tunnel_dst_port_alloc(
5137 bp, bp->vxlan_port,
5138 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5139 }
5140 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
5141 bnxt_hwrm_tunnel_dst_port_free(
5142 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5143 }
Michael Chan028de142015-12-09 19:35:44 -05005144 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5145 /* bnxt_reset_task() calls bnxt_close_nic() which waits
5146 * for BNXT_STATE_IN_SP_TASK to clear.
5147 */
5148 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5149 rtnl_lock();
Michael Chanc0c050c2015-10-22 16:01:17 -04005150 bnxt_reset_task(bp);
Michael Chan028de142015-12-09 19:35:44 -05005151 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5152 rtnl_unlock();
5153 }
Michael Chan4cebdce2015-12-09 19:35:43 -05005154
5155 smp_mb__before_atomic();
5156 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04005157}
5158
5159static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5160{
5161 int rc;
5162 struct bnxt *bp = netdev_priv(dev);
5163
5164 SET_NETDEV_DEV(dev, &pdev->dev);
5165
5166 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5167 rc = pci_enable_device(pdev);
5168 if (rc) {
5169 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
5170 goto init_err;
5171 }
5172
5173 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5174 dev_err(&pdev->dev,
5175 "Cannot find PCI device base address, aborting\n");
5176 rc = -ENODEV;
5177 goto init_err_disable;
5178 }
5179
5180 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5181 if (rc) {
5182 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
5183 goto init_err_disable;
5184 }
5185
5186 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
5187 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
5188 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
5189 goto init_err_disable;
5190 }
5191
5192 pci_set_master(pdev);
5193
5194 bp->dev = dev;
5195 bp->pdev = pdev;
5196
5197 bp->bar0 = pci_ioremap_bar(pdev, 0);
5198 if (!bp->bar0) {
5199 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5200 rc = -ENOMEM;
5201 goto init_err_release;
5202 }
5203
5204 bp->bar1 = pci_ioremap_bar(pdev, 2);
5205 if (!bp->bar1) {
5206 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
5207 rc = -ENOMEM;
5208 goto init_err_release;
5209 }
5210
5211 bp->bar2 = pci_ioremap_bar(pdev, 4);
5212 if (!bp->bar2) {
5213 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
5214 rc = -ENOMEM;
5215 goto init_err_release;
5216 }
5217
5218 INIT_WORK(&bp->sp_task, bnxt_sp_task);
5219
5220 spin_lock_init(&bp->ntp_fltr_lock);
5221
5222 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
5223 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
5224
5225 bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
5226 bp->coal_bufs = 20;
5227 bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
5228 bp->coal_bufs_irq = 2;
5229
5230 init_timer(&bp->timer);
5231 bp->timer.data = (unsigned long)bp;
5232 bp->timer.function = bnxt_timer;
5233 bp->current_interval = BNXT_TIMER_INTERVAL;
5234
Michael Chancaefe522015-12-09 19:35:42 -05005235 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04005236
5237 return 0;
5238
5239init_err_release:
5240 if (bp->bar2) {
5241 pci_iounmap(pdev, bp->bar2);
5242 bp->bar2 = NULL;
5243 }
5244
5245 if (bp->bar1) {
5246 pci_iounmap(pdev, bp->bar1);
5247 bp->bar1 = NULL;
5248 }
5249
5250 if (bp->bar0) {
5251 pci_iounmap(pdev, bp->bar0);
5252 bp->bar0 = NULL;
5253 }
5254
5255 pci_release_regions(pdev);
5256
5257init_err_disable:
5258 pci_disable_device(pdev);
5259
5260init_err:
5261 return rc;
5262}
5263
5264/* rtnl_lock held */
5265static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5266{
5267 struct sockaddr *addr = p;
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05005268 struct bnxt *bp = netdev_priv(dev);
5269 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04005270
5271 if (!is_valid_ether_addr(addr->sa_data))
5272 return -EADDRNOTAVAIL;
5273
Jeffrey Huangbdd43472015-12-02 01:54:07 -05005274#ifdef CONFIG_BNXT_SRIOV
5275 if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
5276 return -EADDRNOTAVAIL;
5277#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04005278
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05005279 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5280 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04005281
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05005282 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5283 if (netif_running(dev)) {
5284 bnxt_close_nic(bp, false, false);
5285 rc = bnxt_open_nic(bp, false, false);
5286 }
5287
5288 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04005289}
5290
5291/* rtnl_lock held */
5292static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
5293{
5294 struct bnxt *bp = netdev_priv(dev);
5295
5296 if (new_mtu < 60 || new_mtu > 9000)
5297 return -EINVAL;
5298
5299 if (netif_running(dev))
5300 bnxt_close_nic(bp, false, false);
5301
5302 dev->mtu = new_mtu;
5303 bnxt_set_ring_params(bp);
5304
5305 if (netif_running(dev))
5306 return bnxt_open_nic(bp, false, false);
5307
5308 return 0;
5309}
5310
5311static int bnxt_setup_tc(struct net_device *dev, u8 tc)
5312{
5313 struct bnxt *bp = netdev_priv(dev);
5314
5315 if (tc > bp->max_tc) {
5316 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
5317 tc, bp->max_tc);
5318 return -EINVAL;
5319 }
5320
5321 if (netdev_get_num_tc(dev) == tc)
5322 return 0;
5323
5324 if (tc) {
5325 int max_rx_rings, max_tx_rings;
5326
5327 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5328 if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
5329 return -ENOMEM;
5330 }
5331
5332 /* Needs to close the device and do hw resource re-allocations */
5333 if (netif_running(bp->dev))
5334 bnxt_close_nic(bp, true, false);
5335
5336 if (tc) {
5337 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
5338 netdev_set_num_tc(dev, tc);
5339 } else {
5340 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5341 netdev_reset_tc(dev);
5342 }
5343 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
5344 bp->num_stat_ctxs = bp->cp_nr_rings;
5345
5346 if (netif_running(bp->dev))
5347 return bnxt_open_nic(bp, true, false);
5348
5349 return 0;
5350}
5351
5352#ifdef CONFIG_RFS_ACCEL
5353static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
5354 struct bnxt_ntuple_filter *f2)
5355{
5356 struct flow_keys *keys1 = &f1->fkeys;
5357 struct flow_keys *keys2 = &f2->fkeys;
5358
5359 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
5360 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
5361 keys1->ports.ports == keys2->ports.ports &&
5362 keys1->basic.ip_proto == keys2->basic.ip_proto &&
5363 keys1->basic.n_proto == keys2->basic.n_proto &&
5364 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
5365 return true;
5366
5367 return false;
5368}
5369
5370static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
5371 u16 rxq_index, u32 flow_id)
5372{
5373 struct bnxt *bp = netdev_priv(dev);
5374 struct bnxt_ntuple_filter *fltr, *new_fltr;
5375 struct flow_keys *fkeys;
5376 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
Michael Chan84e86b92015-11-05 16:25:50 -05005377 int rc = 0, idx, bit_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04005378 struct hlist_head *head;
5379
5380 if (skb->encapsulation)
5381 return -EPROTONOSUPPORT;
5382
5383 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
5384 if (!new_fltr)
5385 return -ENOMEM;
5386
5387 fkeys = &new_fltr->fkeys;
5388 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
5389 rc = -EPROTONOSUPPORT;
5390 goto err_free;
5391 }
5392
5393 if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
5394 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
5395 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
5396 rc = -EPROTONOSUPPORT;
5397 goto err_free;
5398 }
5399
5400 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
5401
5402 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
5403 head = &bp->ntp_fltr_hash_tbl[idx];
5404 rcu_read_lock();
5405 hlist_for_each_entry_rcu(fltr, head, hash) {
5406 if (bnxt_fltr_match(fltr, new_fltr)) {
5407 rcu_read_unlock();
5408 rc = 0;
5409 goto err_free;
5410 }
5411 }
5412 rcu_read_unlock();
5413
5414 spin_lock_bh(&bp->ntp_fltr_lock);
Michael Chan84e86b92015-11-05 16:25:50 -05005415 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5416 BNXT_NTP_FLTR_MAX_FLTR, 0);
5417 if (bit_id < 0) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005418 spin_unlock_bh(&bp->ntp_fltr_lock);
5419 rc = -ENOMEM;
5420 goto err_free;
5421 }
5422
Michael Chan84e86b92015-11-05 16:25:50 -05005423 new_fltr->sw_id = (u16)bit_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04005424 new_fltr->flow_id = flow_id;
5425 new_fltr->rxq = rxq_index;
5426 hlist_add_head_rcu(&new_fltr->hash, head);
5427 bp->ntp_fltr_count++;
5428 spin_unlock_bh(&bp->ntp_fltr_lock);
5429
5430 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
5431 schedule_work(&bp->sp_task);
5432
5433 return new_fltr->sw_id;
5434
5435err_free:
5436 kfree(new_fltr);
5437 return rc;
5438}
5439
5440static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5441{
5442 int i;
5443
5444 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5445 struct hlist_head *head;
5446 struct hlist_node *tmp;
5447 struct bnxt_ntuple_filter *fltr;
5448 int rc;
5449
5450 head = &bp->ntp_fltr_hash_tbl[i];
5451 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
5452 bool del = false;
5453
5454 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
5455 if (rps_may_expire_flow(bp->dev, fltr->rxq,
5456 fltr->flow_id,
5457 fltr->sw_id)) {
5458 bnxt_hwrm_cfa_ntuple_filter_free(bp,
5459 fltr);
5460 del = true;
5461 }
5462 } else {
5463 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
5464 fltr);
5465 if (rc)
5466 del = true;
5467 else
5468 set_bit(BNXT_FLTR_VALID, &fltr->state);
5469 }
5470
5471 if (del) {
5472 spin_lock_bh(&bp->ntp_fltr_lock);
5473 hlist_del_rcu(&fltr->hash);
5474 bp->ntp_fltr_count--;
5475 spin_unlock_bh(&bp->ntp_fltr_lock);
5476 synchronize_rcu();
5477 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5478 kfree(fltr);
5479 }
5480 }
5481 }
5482}
5483
5484#else
5485
5486static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5487{
5488}
5489
5490#endif /* CONFIG_RFS_ACCEL */
5491
5492static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5493 __be16 port)
5494{
5495 struct bnxt *bp = netdev_priv(dev);
5496
5497 if (!netif_running(dev))
5498 return;
5499
5500 if (sa_family != AF_INET6 && sa_family != AF_INET)
5501 return;
5502
5503 if (bp->vxlan_port_cnt && bp->vxlan_port != port)
5504 return;
5505
5506 bp->vxlan_port_cnt++;
5507 if (bp->vxlan_port_cnt == 1) {
5508 bp->vxlan_port = port;
5509 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
5510 schedule_work(&bp->sp_task);
5511 }
5512}
5513
5514static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5515 __be16 port)
5516{
5517 struct bnxt *bp = netdev_priv(dev);
5518
5519 if (!netif_running(dev))
5520 return;
5521
5522 if (sa_family != AF_INET6 && sa_family != AF_INET)
5523 return;
5524
5525 if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
5526 bp->vxlan_port_cnt--;
5527
5528 if (bp->vxlan_port_cnt == 0) {
5529 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
5530 schedule_work(&bp->sp_task);
5531 }
5532 }
5533}
5534
5535static const struct net_device_ops bnxt_netdev_ops = {
5536 .ndo_open = bnxt_open,
5537 .ndo_start_xmit = bnxt_start_xmit,
5538 .ndo_stop = bnxt_close,
5539 .ndo_get_stats64 = bnxt_get_stats64,
5540 .ndo_set_rx_mode = bnxt_set_rx_mode,
5541 .ndo_do_ioctl = bnxt_ioctl,
5542 .ndo_validate_addr = eth_validate_addr,
5543 .ndo_set_mac_address = bnxt_change_mac_addr,
5544 .ndo_change_mtu = bnxt_change_mtu,
5545 .ndo_fix_features = bnxt_fix_features,
5546 .ndo_set_features = bnxt_set_features,
5547 .ndo_tx_timeout = bnxt_tx_timeout,
5548#ifdef CONFIG_BNXT_SRIOV
5549 .ndo_get_vf_config = bnxt_get_vf_config,
5550 .ndo_set_vf_mac = bnxt_set_vf_mac,
5551 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
5552 .ndo_set_vf_rate = bnxt_set_vf_bw,
5553 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
5554 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
5555#endif
5556#ifdef CONFIG_NET_POLL_CONTROLLER
5557 .ndo_poll_controller = bnxt_poll_controller,
5558#endif
5559 .ndo_setup_tc = bnxt_setup_tc,
5560#ifdef CONFIG_RFS_ACCEL
5561 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
5562#endif
5563 .ndo_add_vxlan_port = bnxt_add_vxlan_port,
5564 .ndo_del_vxlan_port = bnxt_del_vxlan_port,
5565#ifdef CONFIG_NET_RX_BUSY_POLL
5566 .ndo_busy_poll = bnxt_busy_poll,
5567#endif
5568};
5569
5570static void bnxt_remove_one(struct pci_dev *pdev)
5571{
5572 struct net_device *dev = pci_get_drvdata(pdev);
5573 struct bnxt *bp = netdev_priv(dev);
5574
5575 if (BNXT_PF(bp))
5576 bnxt_sriov_disable(bp);
5577
5578 unregister_netdev(dev);
5579 cancel_work_sync(&bp->sp_task);
5580 bp->sp_event = 0;
5581
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05005582 bnxt_hwrm_func_drv_unrgtr(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005583 bnxt_free_hwrm_resources(bp);
5584 pci_iounmap(pdev, bp->bar2);
5585 pci_iounmap(pdev, bp->bar1);
5586 pci_iounmap(pdev, bp->bar0);
5587 free_netdev(dev);
5588
5589 pci_release_regions(pdev);
5590 pci_disable_device(pdev);
5591}
5592
5593static int bnxt_probe_phy(struct bnxt *bp)
5594{
5595 int rc = 0;
5596 struct bnxt_link_info *link_info = &bp->link_info;
5597 char phy_ver[PHY_VER_STR_LEN];
5598
5599 rc = bnxt_update_link(bp, false);
5600 if (rc) {
5601 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
5602 rc);
5603 return rc;
5604 }
5605
5606 /*initialize the ethool setting copy with NVM settings */
5607 if (BNXT_AUTO_MODE(link_info->auto_mode))
5608 link_info->autoneg |= BNXT_AUTONEG_SPEED;
5609
5610 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
5611 if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
5612 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5613 link_info->req_flow_ctrl = link_info->auto_pause_setting;
5614 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
5615 link_info->req_flow_ctrl = link_info->force_pause_setting;
5616 }
5617 link_info->req_duplex = link_info->duplex_setting;
5618 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5619 link_info->req_link_speed = link_info->auto_link_speed;
5620 else
5621 link_info->req_link_speed = link_info->force_link_speed;
5622 link_info->advertising = link_info->auto_link_speeds;
5623 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
5624 link_info->phy_ver[0],
5625 link_info->phy_ver[1],
5626 link_info->phy_ver[2]);
5627 strcat(bp->fw_ver_str, phy_ver);
5628 return rc;
5629}
5630
5631static int bnxt_get_max_irq(struct pci_dev *pdev)
5632{
5633 u16 ctrl;
5634
5635 if (!pdev->msix_cap)
5636 return 1;
5637
5638 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
5639 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
5640}
5641
5642void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
5643{
Michael Chanb72d4a62015-12-27 18:19:27 -05005644 int max_rings = 0, max_ring_grps = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04005645
5646 if (BNXT_PF(bp)) {
Michael Chan4a21b492015-12-27 18:19:26 -05005647 *max_tx = bp->pf.max_tx_rings;
5648 *max_rx = bp->pf.max_rx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04005649 max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
5650 max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
Michael Chanb72d4a62015-12-27 18:19:27 -05005651 max_ring_grps = bp->pf.max_hw_ring_grps;
Michael Chanc0c050c2015-10-22 16:01:17 -04005652 } else {
Michael Chan379a80a2015-10-23 15:06:19 -04005653#ifdef CONFIG_BNXT_SRIOV
Michael Chanc0c050c2015-10-22 16:01:17 -04005654 *max_tx = bp->vf.max_tx_rings;
5655 *max_rx = bp->vf.max_rx_rings;
5656 max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
5657 max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
Michael Chanb72d4a62015-12-27 18:19:27 -05005658 max_ring_grps = bp->vf.max_hw_ring_grps;
Michael Chan379a80a2015-10-23 15:06:19 -04005659#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04005660 }
5661 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5662 *max_rx >>= 1;
5663
5664 *max_rx = min_t(int, *max_rx, max_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05005665 *max_rx = min_t(int, *max_rx, max_ring_grps);
Michael Chanc0c050c2015-10-22 16:01:17 -04005666 *max_tx = min_t(int, *max_tx, max_rings);
5667}
5668
5669static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5670{
5671 static int version_printed;
5672 struct net_device *dev;
5673 struct bnxt *bp;
5674 int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
5675
5676 if (version_printed++ == 0)
5677 pr_info("%s", version);
5678
5679 max_irqs = bnxt_get_max_irq(pdev);
5680 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
5681 if (!dev)
5682 return -ENOMEM;
5683
5684 bp = netdev_priv(dev);
5685
5686 if (bnxt_vf_pciid(ent->driver_data))
5687 bp->flags |= BNXT_FLAG_VF;
5688
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005689 if (pdev->msix_cap)
Michael Chanc0c050c2015-10-22 16:01:17 -04005690 bp->flags |= BNXT_FLAG_MSIX_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -04005691
5692 rc = bnxt_init_board(pdev, dev);
5693 if (rc < 0)
5694 goto init_err_free;
5695
5696 dev->netdev_ops = &bnxt_netdev_ops;
5697 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
5698 dev->ethtool_ops = &bnxt_ethtool_ops;
5699
5700 pci_set_drvdata(pdev, dev);
5701
5702 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
5703 NETIF_F_TSO | NETIF_F_TSO6 |
5704 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
5705 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
5706 NETIF_F_RXHASH |
5707 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
5708
Michael Chanc0c050c2015-10-22 16:01:17 -04005709 dev->hw_enc_features =
5710 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
5711 NETIF_F_TSO | NETIF_F_TSO6 |
5712 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
5713 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
5714 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
5715 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
5716 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
5717 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
5718 dev->priv_flags |= IFF_UNICAST_FLT;
5719
5720#ifdef CONFIG_BNXT_SRIOV
5721 init_waitqueue_head(&bp->sriov_cfg_wait);
5722#endif
5723 rc = bnxt_alloc_hwrm_resources(bp);
5724 if (rc)
5725 goto init_err;
5726
5727 mutex_init(&bp->hwrm_cmd_lock);
5728 bnxt_hwrm_ver_get(bp);
5729
5730 rc = bnxt_hwrm_func_drv_rgtr(bp);
5731 if (rc)
5732 goto init_err;
5733
5734 /* Get the MAX capabilities for this function */
5735 rc = bnxt_hwrm_func_qcaps(bp);
5736 if (rc) {
5737 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
5738 rc);
5739 rc = -1;
5740 goto init_err;
5741 }
5742
5743 rc = bnxt_hwrm_queue_qportcfg(bp);
5744 if (rc) {
5745 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
5746 rc);
5747 rc = -1;
5748 goto init_err;
5749 }
5750
5751 bnxt_set_tpa_flags(bp);
5752 bnxt_set_ring_params(bp);
5753 dflt_rings = netif_get_num_default_rss_queues();
Jeffrey Huangbdd43472015-12-02 01:54:07 -05005754 if (BNXT_PF(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04005755 bp->pf.max_irqs = max_irqs;
Michael Chan379a80a2015-10-23 15:06:19 -04005756#if defined(CONFIG_BNXT_SRIOV)
Jeffrey Huangbdd43472015-12-02 01:54:07 -05005757 else
Michael Chanc0c050c2015-10-22 16:01:17 -04005758 bp->vf.max_irqs = max_irqs;
Michael Chan379a80a2015-10-23 15:06:19 -04005759#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04005760 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5761 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
5762 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
5763 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5764 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
5765 bp->num_stat_ctxs = bp->cp_nr_rings;
5766
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005767 if (BNXT_PF(bp)) {
5768 dev->hw_features |= NETIF_F_NTUPLE;
5769 if (bnxt_rfs_capable(bp)) {
5770 bp->flags |= BNXT_FLAG_RFS;
5771 dev->features |= NETIF_F_NTUPLE;
5772 }
5773 }
5774
Michael Chanc0c050c2015-10-22 16:01:17 -04005775 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
5776 bp->flags |= BNXT_FLAG_STRIP_VLAN;
5777
5778 rc = bnxt_probe_phy(bp);
5779 if (rc)
5780 goto init_err;
5781
5782 rc = register_netdev(dev);
5783 if (rc)
5784 goto init_err;
5785
5786 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
5787 board_info[ent->driver_data].name,
5788 (long)pci_resource_start(pdev, 0), dev->dev_addr);
5789
5790 return 0;
5791
5792init_err:
5793 pci_iounmap(pdev, bp->bar0);
5794 pci_release_regions(pdev);
5795 pci_disable_device(pdev);
5796
5797init_err_free:
5798 free_netdev(dev);
5799 return rc;
5800}
5801
5802static struct pci_driver bnxt_pci_driver = {
5803 .name = DRV_MODULE_NAME,
5804 .id_table = bnxt_pci_tbl,
5805 .probe = bnxt_init_one,
5806 .remove = bnxt_remove_one,
5807#if defined(CONFIG_BNXT_SRIOV)
5808 .sriov_configure = bnxt_sriov_configure,
5809#endif
5810};
5811
5812module_pci_driver(bnxt_pci_driver);