blob: e8ab5fd6965d4f6f38d6a391026e130f859b014a [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
Michael Chan11f15ed2016-04-05 14:08:55 -04003 * Copyright (c) 2014-2016 Broadcom Corporation
Michael Chanc0c050c2015-10-22 16:01:17 -04004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11
12#include <linux/stringify.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/errno.h>
16#include <linux/ioport.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/interrupt.h>
20#include <linux/pci.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/dma-mapping.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/delay.h>
29#include <asm/byteorder.h>
30#include <asm/page.h>
31#include <linux/time.h>
32#include <linux/mii.h>
33#include <linux/if.h>
34#include <linux/if_vlan.h>
Rob Swindell5ac67d82016-09-19 03:58:03 -040035#include <linux/rtc.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040036#include <net/ip.h>
37#include <net/tcp.h>
38#include <net/udp.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Alexander Duyckad51b8e2016-06-16 12:21:19 -070041#include <net/udp_tunnel.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040042#ifdef CONFIG_NET_RX_BUSY_POLL
43#include <net/busy_poll.h>
44#endif
45#include <linux/workqueue.h>
46#include <linux/prefetch.h>
47#include <linux/cache.h>
48#include <linux/log2.h>
49#include <linux/aer.h>
50#include <linux/bitmap.h>
51#include <linux/cpu_rmap.h>
52
53#include "bnxt_hsi.h"
54#include "bnxt.h"
55#include "bnxt_sriov.h"
56#include "bnxt_ethtool.h"
Michael Chan7df4ae92016-12-02 21:17:17 -050057#include "bnxt_dcb.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040058
59#define BNXT_TX_TIMEOUT (5 * HZ)
60
61static const char version[] =
62 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
63
64MODULE_LICENSE("GPL");
65MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
66MODULE_VERSION(DRV_MODULE_VERSION);
67
68#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70#define BNXT_RX_COPY_THRESH 256
71
Michael Chan4419dbe2016-02-10 17:33:49 -050072#define BNXT_TX_PUSH_THRESH 164
Michael Chanc0c050c2015-10-22 16:01:17 -040073
74enum board_idx {
David Christensenfbc9a522015-12-27 18:19:29 -050075 BCM57301,
Michael Chanc0c050c2015-10-22 16:01:17 -040076 BCM57302,
77 BCM57304,
Michael Chan1f681682016-07-25 12:33:37 -040078 BCM57417_NPAR,
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -040079 BCM58700,
Michael Chanb24eb6a2016-06-13 02:25:36 -040080 BCM57311,
81 BCM57312,
David Christensenfbc9a522015-12-27 18:19:29 -050082 BCM57402,
Michael Chanc0c050c2015-10-22 16:01:17 -040083 BCM57404,
84 BCM57406,
Michael Chan1f681682016-07-25 12:33:37 -040085 BCM57402_NPAR,
86 BCM57407,
Michael Chanb24eb6a2016-06-13 02:25:36 -040087 BCM57412,
88 BCM57414,
89 BCM57416,
90 BCM57417,
Michael Chan1f681682016-07-25 12:33:37 -040091 BCM57412_NPAR,
Michael Chan5049e332016-05-15 03:04:50 -040092 BCM57314,
Michael Chan1f681682016-07-25 12:33:37 -040093 BCM57417_SFP,
94 BCM57416_SFP,
95 BCM57404_NPAR,
96 BCM57406_NPAR,
97 BCM57407_SFP,
Michael Chanadbc8302016-09-19 03:58:01 -040098 BCM57407_NPAR,
Michael Chan1f681682016-07-25 12:33:37 -040099 BCM57414_NPAR,
100 BCM57416_NPAR,
Michael Chanadbc8302016-09-19 03:58:01 -0400101 NETXTREME_E_VF,
102 NETXTREME_C_VF,
Michael Chanc0c050c2015-10-22 16:01:17 -0400103};
104
105/* indexed by enum above */
106static const struct {
107 char *name;
108} board_info[] = {
Michael Chanadbc8302016-09-19 03:58:01 -0400109 { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
110 { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
111 { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400112 { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400113 { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
114 { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
115 { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
116 { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
117 { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
118 { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400119 { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400120 { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
121 { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
122 { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
123 { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
124 { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400125 { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400126 { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
127 { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
128 { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400129 { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
130 { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400131 { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
132 { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
Michael Chan1f681682016-07-25 12:33:37 -0400133 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
134 { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400135 { "Broadcom NetXtreme-E Ethernet Virtual Function" },
136 { "Broadcom NetXtreme-C Ethernet Virtual Function" },
Michael Chanc0c050c2015-10-22 16:01:17 -0400137};
138
139static const struct pci_device_id bnxt_pci_tbl[] = {
Michael Chanadbc8302016-09-19 03:58:01 -0400140 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
David Christensenfbc9a522015-12-27 18:19:29 -0500141 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400142 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
143 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
Michael Chan1f681682016-07-25 12:33:37 -0400144 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -0400145 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400146 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
147 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
David Christensenfbc9a522015-12-27 18:19:29 -0500148 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400149 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
150 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
Michael Chan1f681682016-07-25 12:33:37 -0400151 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
152 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400153 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
154 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
155 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
156 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
Michael Chan1f681682016-07-25 12:33:37 -0400157 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
Michael Chan5049e332016-05-15 03:04:50 -0400158 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
Michael Chan1f681682016-07-25 12:33:37 -0400159 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
160 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
161 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
162 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
163 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
Michael Chanadbc8302016-09-19 03:58:01 -0400164 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
165 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400166 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400167 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400168 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400169 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
Michael Chanc0c050c2015-10-22 16:01:17 -0400170#ifdef CONFIG_BNXT_SRIOV
Michael Chanadbc8302016-09-19 03:58:01 -0400171 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
172 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
173 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
174 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
175 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
176 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
Michael Chanc0c050c2015-10-22 16:01:17 -0400177#endif
178 { 0 }
179};
180
181MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
182
183static const u16 bnxt_vf_req_snif[] = {
184 HWRM_FUNC_CFG,
185 HWRM_PORT_PHY_QCFG,
186 HWRM_CFA_L2_FILTER_ALLOC,
187};
188
Michael Chan25be8622016-04-05 14:09:00 -0400189static const u16 bnxt_async_events_arr[] = {
Michael Chan87c374d2016-12-02 21:17:16 -0500190 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
191 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
192 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
193 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
194 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
Michael Chan25be8622016-04-05 14:09:00 -0400195};
196
Michael Chanc0c050c2015-10-22 16:01:17 -0400197static bool bnxt_vf_pciid(enum board_idx idx)
198{
Michael Chanadbc8302016-09-19 03:58:01 -0400199 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
Michael Chanc0c050c2015-10-22 16:01:17 -0400200}
201
202#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
203#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
204#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
205
206#define BNXT_CP_DB_REARM(db, raw_cons) \
207 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
208
209#define BNXT_CP_DB(db, raw_cons) \
210 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
211
212#define BNXT_CP_DB_IRQ_DIS(db) \
213 writel(DB_CP_IRQ_DIS_FLAGS, db)
214
215static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
216{
217 /* Tell compiler to fetch tx indices from memory. */
218 barrier();
219
220 return bp->tx_ring_size -
221 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
222}
223
224static const u16 bnxt_lhint_arr[] = {
225 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
226 TX_BD_FLAGS_LHINT_512_TO_1023,
227 TX_BD_FLAGS_LHINT_1024_TO_2047,
228 TX_BD_FLAGS_LHINT_1024_TO_2047,
229 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
230 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
231 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
232 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
233 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
234 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
235 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
236 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
237 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
238 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
239 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
240 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
241 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
242 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
243 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
244};
245
246static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
247{
248 struct bnxt *bp = netdev_priv(dev);
249 struct tx_bd *txbd;
250 struct tx_bd_ext *txbd1;
251 struct netdev_queue *txq;
252 int i;
253 dma_addr_t mapping;
254 unsigned int length, pad = 0;
255 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
256 u16 prod, last_frag;
257 struct pci_dev *pdev = bp->pdev;
Michael Chanc0c050c2015-10-22 16:01:17 -0400258 struct bnxt_tx_ring_info *txr;
259 struct bnxt_sw_tx_bd *tx_buf;
260
261 i = skb_get_queue_mapping(skb);
262 if (unlikely(i >= bp->tx_nr_rings)) {
263 dev_kfree_skb_any(skb);
264 return NETDEV_TX_OK;
265 }
266
Michael Chanb6ab4b02016-01-02 23:44:59 -0500267 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -0400268 txq = netdev_get_tx_queue(dev, i);
269 prod = txr->tx_prod;
270
271 free_size = bnxt_tx_avail(bp, txr);
272 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
273 netif_tx_stop_queue(txq);
274 return NETDEV_TX_BUSY;
275 }
276
277 length = skb->len;
278 len = skb_headlen(skb);
279 last_frag = skb_shinfo(skb)->nr_frags;
280
281 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
282
283 txbd->tx_bd_opaque = prod;
284
285 tx_buf = &txr->tx_buf_ring[prod];
286 tx_buf->skb = skb;
287 tx_buf->nr_frags = last_frag;
288
289 vlan_tag_flags = 0;
290 cfa_action = 0;
291 if (skb_vlan_tag_present(skb)) {
292 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
293 skb_vlan_tag_get(skb);
294 /* Currently supports 8021Q, 8021AD vlan offloads
295 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
296 */
297 if (skb->vlan_proto == htons(ETH_P_8021Q))
298 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
299 }
300
301 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
Michael Chan4419dbe2016-02-10 17:33:49 -0500302 struct tx_push_buffer *tx_push_buf = txr->tx_push;
303 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
304 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
305 void *pdata = tx_push_buf->data;
306 u64 *end;
307 int j, push_len;
Michael Chanc0c050c2015-10-22 16:01:17 -0400308
309 /* Set COAL_NOW to be ready quickly for the next push */
310 tx_push->tx_bd_len_flags_type =
311 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
312 TX_BD_TYPE_LONG_TX_BD |
313 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
314 TX_BD_FLAGS_COAL_NOW |
315 TX_BD_FLAGS_PACKET_END |
316 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
317
318 if (skb->ip_summed == CHECKSUM_PARTIAL)
319 tx_push1->tx_bd_hsize_lflags =
320 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
321 else
322 tx_push1->tx_bd_hsize_lflags = 0;
323
324 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
325 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
326
Michael Chanfbb0fa82016-02-22 02:10:26 -0500327 end = pdata + length;
328 end = PTR_ALIGN(end, 8) - 1;
Michael Chan4419dbe2016-02-10 17:33:49 -0500329 *end = 0;
330
Michael Chanc0c050c2015-10-22 16:01:17 -0400331 skb_copy_from_linear_data(skb, pdata, len);
332 pdata += len;
333 for (j = 0; j < last_frag; j++) {
334 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
335 void *fptr;
336
337 fptr = skb_frag_address_safe(frag);
338 if (!fptr)
339 goto normal_tx;
340
341 memcpy(pdata, fptr, skb_frag_size(frag));
342 pdata += skb_frag_size(frag);
343 }
344
Michael Chan4419dbe2016-02-10 17:33:49 -0500345 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
346 txbd->tx_bd_haddr = txr->data_mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400347 prod = NEXT_TX(prod);
348 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
349 memcpy(txbd, tx_push1, sizeof(*txbd));
350 prod = NEXT_TX(prod);
Michael Chan4419dbe2016-02-10 17:33:49 -0500351 tx_push->doorbell =
Michael Chanc0c050c2015-10-22 16:01:17 -0400352 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
353 txr->tx_prod = prod;
354
Michael Chanb9a84602016-06-06 02:37:14 -0400355 tx_buf->is_push = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -0400356 netdev_tx_sent_queue(txq, skb->len);
Michael Chanb9a84602016-06-06 02:37:14 -0400357 wmb(); /* Sync is_push and byte queue before pushing data */
Michael Chanc0c050c2015-10-22 16:01:17 -0400358
Michael Chan4419dbe2016-02-10 17:33:49 -0500359 push_len = (length + sizeof(*tx_push) + 7) / 8;
360 if (push_len > 16) {
361 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
Michael Chan9d137442016-09-05 01:57:35 -0400362 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
363 (push_len - 16) << 1);
Michael Chan4419dbe2016-02-10 17:33:49 -0500364 } else {
365 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
366 push_len);
367 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400368
Michael Chanc0c050c2015-10-22 16:01:17 -0400369 goto tx_done;
370 }
371
372normal_tx:
373 if (length < BNXT_MIN_PKT_SIZE) {
374 pad = BNXT_MIN_PKT_SIZE - length;
375 if (skb_pad(skb, pad)) {
376 /* SKB already freed. */
377 tx_buf->skb = NULL;
378 return NETDEV_TX_OK;
379 }
380 length = BNXT_MIN_PKT_SIZE;
381 }
382
383 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
384
385 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
386 dev_kfree_skb_any(skb);
387 tx_buf->skb = NULL;
388 return NETDEV_TX_OK;
389 }
390
391 dma_unmap_addr_set(tx_buf, mapping, mapping);
392 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
393 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
394
395 txbd->tx_bd_haddr = cpu_to_le64(mapping);
396
397 prod = NEXT_TX(prod);
398 txbd1 = (struct tx_bd_ext *)
399 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
400
401 txbd1->tx_bd_hsize_lflags = 0;
402 if (skb_is_gso(skb)) {
403 u32 hdr_len;
404
405 if (skb->encapsulation)
406 hdr_len = skb_inner_network_offset(skb) +
407 skb_inner_network_header_len(skb) +
408 inner_tcp_hdrlen(skb);
409 else
410 hdr_len = skb_transport_offset(skb) +
411 tcp_hdrlen(skb);
412
413 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
414 TX_BD_FLAGS_T_IPID |
415 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
416 length = skb_shinfo(skb)->gso_size;
417 txbd1->tx_bd_mss = cpu_to_le32(length);
418 length += hdr_len;
419 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
420 txbd1->tx_bd_hsize_lflags =
421 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
422 txbd1->tx_bd_mss = 0;
423 }
424
425 length >>= 9;
426 flags |= bnxt_lhint_arr[length];
427 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
428
429 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
430 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
431 for (i = 0; i < last_frag; i++) {
432 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
433
434 prod = NEXT_TX(prod);
435 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
436
437 len = skb_frag_size(frag);
438 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
439 DMA_TO_DEVICE);
440
441 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
442 goto tx_dma_error;
443
444 tx_buf = &txr->tx_buf_ring[prod];
445 dma_unmap_addr_set(tx_buf, mapping, mapping);
446
447 txbd->tx_bd_haddr = cpu_to_le64(mapping);
448
449 flags = len << TX_BD_LEN_SHIFT;
450 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
451 }
452
453 flags &= ~TX_BD_LEN;
454 txbd->tx_bd_len_flags_type =
455 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
456 TX_BD_FLAGS_PACKET_END);
457
458 netdev_tx_sent_queue(txq, skb->len);
459
460 /* Sync BD data before updating doorbell */
461 wmb();
462
463 prod = NEXT_TX(prod);
464 txr->tx_prod = prod;
465
466 writel(DB_KEY_TX | prod, txr->tx_doorbell);
467 writel(DB_KEY_TX | prod, txr->tx_doorbell);
468
469tx_done:
470
471 mmiowb();
472
473 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
474 netif_tx_stop_queue(txq);
475
476 /* netif_tx_stop_queue() must be done before checking
477 * tx index in bnxt_tx_avail() below, because in
478 * bnxt_tx_int(), we update tx index before checking for
479 * netif_tx_queue_stopped().
480 */
481 smp_mb();
482 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
483 netif_tx_wake_queue(txq);
484 }
485 return NETDEV_TX_OK;
486
487tx_dma_error:
488 last_frag = i;
489
490 /* start back at beginning and unmap skb */
491 prod = txr->tx_prod;
492 tx_buf = &txr->tx_buf_ring[prod];
493 tx_buf->skb = NULL;
494 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
495 skb_headlen(skb), PCI_DMA_TODEVICE);
496 prod = NEXT_TX(prod);
497
498 /* unmap remaining mapped pages */
499 for (i = 0; i < last_frag; i++) {
500 prod = NEXT_TX(prod);
501 tx_buf = &txr->tx_buf_ring[prod];
502 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
503 skb_frag_size(&skb_shinfo(skb)->frags[i]),
504 PCI_DMA_TODEVICE);
505 }
506
507 dev_kfree_skb_any(skb);
508 return NETDEV_TX_OK;
509}
510
511static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
512{
Michael Chanb6ab4b02016-01-02 23:44:59 -0500513 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chanb81a90d2016-01-02 23:45:01 -0500514 int index = txr - &bp->tx_ring[0];
Michael Chanc0c050c2015-10-22 16:01:17 -0400515 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
516 u16 cons = txr->tx_cons;
517 struct pci_dev *pdev = bp->pdev;
518 int i;
519 unsigned int tx_bytes = 0;
520
521 for (i = 0; i < nr_pkts; i++) {
522 struct bnxt_sw_tx_bd *tx_buf;
523 struct sk_buff *skb;
524 int j, last;
525
526 tx_buf = &txr->tx_buf_ring[cons];
527 cons = NEXT_TX(cons);
528 skb = tx_buf->skb;
529 tx_buf->skb = NULL;
530
531 if (tx_buf->is_push) {
532 tx_buf->is_push = 0;
533 goto next_tx_int;
534 }
535
536 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
537 skb_headlen(skb), PCI_DMA_TODEVICE);
538 last = tx_buf->nr_frags;
539
540 for (j = 0; j < last; j++) {
541 cons = NEXT_TX(cons);
542 tx_buf = &txr->tx_buf_ring[cons];
543 dma_unmap_page(
544 &pdev->dev,
545 dma_unmap_addr(tx_buf, mapping),
546 skb_frag_size(&skb_shinfo(skb)->frags[j]),
547 PCI_DMA_TODEVICE);
548 }
549
550next_tx_int:
551 cons = NEXT_TX(cons);
552
553 tx_bytes += skb->len;
554 dev_kfree_skb_any(skb);
555 }
556
557 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
558 txr->tx_cons = cons;
559
560 /* Need to make the tx_cons update visible to bnxt_start_xmit()
561 * before checking for netif_tx_queue_stopped(). Without the
562 * memory barrier, there is a small possibility that bnxt_start_xmit()
563 * will miss it and cause the queue to be stopped forever.
564 */
565 smp_mb();
566
567 if (unlikely(netif_tx_queue_stopped(txq)) &&
568 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
569 __netif_tx_lock(txq, smp_processor_id());
570 if (netif_tx_queue_stopped(txq) &&
571 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
572 txr->dev_state != BNXT_DEV_STATE_CLOSING)
573 netif_tx_wake_queue(txq);
574 __netif_tx_unlock(txq);
575 }
576}
577
578static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
579 gfp_t gfp)
580{
581 u8 *data;
582 struct pci_dev *pdev = bp->pdev;
583
584 data = kmalloc(bp->rx_buf_size, gfp);
585 if (!data)
586 return NULL;
587
588 *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
589 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
590
591 if (dma_mapping_error(&pdev->dev, *mapping)) {
592 kfree(data);
593 data = NULL;
594 }
595 return data;
596}
597
598static inline int bnxt_alloc_rx_data(struct bnxt *bp,
599 struct bnxt_rx_ring_info *rxr,
600 u16 prod, gfp_t gfp)
601{
602 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
603 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
604 u8 *data;
605 dma_addr_t mapping;
606
607 data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
608 if (!data)
609 return -ENOMEM;
610
611 rx_buf->data = data;
612 dma_unmap_addr_set(rx_buf, mapping, mapping);
613
614 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
615
616 return 0;
617}
618
619static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
620 u8 *data)
621{
622 u16 prod = rxr->rx_prod;
623 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
624 struct rx_bd *cons_bd, *prod_bd;
625
626 prod_rx_buf = &rxr->rx_buf_ring[prod];
627 cons_rx_buf = &rxr->rx_buf_ring[cons];
628
629 prod_rx_buf->data = data;
630
631 dma_unmap_addr_set(prod_rx_buf, mapping,
632 dma_unmap_addr(cons_rx_buf, mapping));
633
634 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
635 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
636
637 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
638}
639
640static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
641{
642 u16 next, max = rxr->rx_agg_bmap_size;
643
644 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
645 if (next >= max)
646 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
647 return next;
648}
649
650static inline int bnxt_alloc_rx_page(struct bnxt *bp,
651 struct bnxt_rx_ring_info *rxr,
652 u16 prod, gfp_t gfp)
653{
654 struct rx_bd *rxbd =
655 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
656 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
657 struct pci_dev *pdev = bp->pdev;
658 struct page *page;
659 dma_addr_t mapping;
660 u16 sw_prod = rxr->rx_sw_agg_prod;
Michael Chan89d0a062016-04-25 02:30:51 -0400661 unsigned int offset = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -0400662
Michael Chan89d0a062016-04-25 02:30:51 -0400663 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
664 page = rxr->rx_page;
665 if (!page) {
666 page = alloc_page(gfp);
667 if (!page)
668 return -ENOMEM;
669 rxr->rx_page = page;
670 rxr->rx_page_offset = 0;
671 }
672 offset = rxr->rx_page_offset;
673 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
674 if (rxr->rx_page_offset == PAGE_SIZE)
675 rxr->rx_page = NULL;
676 else
677 get_page(page);
678 } else {
679 page = alloc_page(gfp);
680 if (!page)
681 return -ENOMEM;
682 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400683
Michael Chan89d0a062016-04-25 02:30:51 -0400684 mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
Michael Chanc0c050c2015-10-22 16:01:17 -0400685 PCI_DMA_FROMDEVICE);
686 if (dma_mapping_error(&pdev->dev, mapping)) {
687 __free_page(page);
688 return -EIO;
689 }
690
691 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
692 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
693
694 __set_bit(sw_prod, rxr->rx_agg_bmap);
695 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
696 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
697
698 rx_agg_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400699 rx_agg_buf->offset = offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400700 rx_agg_buf->mapping = mapping;
701 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
702 rxbd->rx_bd_opaque = sw_prod;
703 return 0;
704}
705
706static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
707 u32 agg_bufs)
708{
709 struct bnxt *bp = bnapi->bp;
710 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500711 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400712 u16 prod = rxr->rx_agg_prod;
713 u16 sw_prod = rxr->rx_sw_agg_prod;
714 u32 i;
715
716 for (i = 0; i < agg_bufs; i++) {
717 u16 cons;
718 struct rx_agg_cmp *agg;
719 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
720 struct rx_bd *prod_bd;
721 struct page *page;
722
723 agg = (struct rx_agg_cmp *)
724 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
725 cons = agg->rx_agg_cmp_opaque;
726 __clear_bit(cons, rxr->rx_agg_bmap);
727
728 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
729 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
730
731 __set_bit(sw_prod, rxr->rx_agg_bmap);
732 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
733 cons_rx_buf = &rxr->rx_agg_ring[cons];
734
735 /* It is possible for sw_prod to be equal to cons, so
736 * set cons_rx_buf->page to NULL first.
737 */
738 page = cons_rx_buf->page;
739 cons_rx_buf->page = NULL;
740 prod_rx_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400741 prod_rx_buf->offset = cons_rx_buf->offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400742
743 prod_rx_buf->mapping = cons_rx_buf->mapping;
744
745 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
746
747 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
748 prod_bd->rx_bd_opaque = sw_prod;
749
750 prod = NEXT_RX_AGG(prod);
751 sw_prod = NEXT_RX_AGG(sw_prod);
752 cp_cons = NEXT_CMP(cp_cons);
753 }
754 rxr->rx_agg_prod = prod;
755 rxr->rx_sw_agg_prod = sw_prod;
756}
757
758static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
759 struct bnxt_rx_ring_info *rxr, u16 cons,
760 u16 prod, u8 *data, dma_addr_t dma_addr,
761 unsigned int len)
762{
763 int err;
764 struct sk_buff *skb;
765
766 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
767 if (unlikely(err)) {
768 bnxt_reuse_rx_data(rxr, cons, data);
769 return NULL;
770 }
771
772 skb = build_skb(data, 0);
773 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
774 PCI_DMA_FROMDEVICE);
775 if (!skb) {
776 kfree(data);
777 return NULL;
778 }
779
780 skb_reserve(skb, BNXT_RX_OFFSET);
781 skb_put(skb, len);
782 return skb;
783}
784
785static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
786 struct sk_buff *skb, u16 cp_cons,
787 u32 agg_bufs)
788{
789 struct pci_dev *pdev = bp->pdev;
790 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500791 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400792 u16 prod = rxr->rx_agg_prod;
793 u32 i;
794
795 for (i = 0; i < agg_bufs; i++) {
796 u16 cons, frag_len;
797 struct rx_agg_cmp *agg;
798 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
799 struct page *page;
800 dma_addr_t mapping;
801
802 agg = (struct rx_agg_cmp *)
803 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
804 cons = agg->rx_agg_cmp_opaque;
805 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
806 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
807
808 cons_rx_buf = &rxr->rx_agg_ring[cons];
Michael Chan89d0a062016-04-25 02:30:51 -0400809 skb_fill_page_desc(skb, i, cons_rx_buf->page,
810 cons_rx_buf->offset, frag_len);
Michael Chanc0c050c2015-10-22 16:01:17 -0400811 __clear_bit(cons, rxr->rx_agg_bmap);
812
813 /* It is possible for bnxt_alloc_rx_page() to allocate
814 * a sw_prod index that equals the cons index, so we
815 * need to clear the cons entry now.
816 */
817 mapping = dma_unmap_addr(cons_rx_buf, mapping);
818 page = cons_rx_buf->page;
819 cons_rx_buf->page = NULL;
820
821 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
822 struct skb_shared_info *shinfo;
823 unsigned int nr_frags;
824
825 shinfo = skb_shinfo(skb);
826 nr_frags = --shinfo->nr_frags;
827 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
828
829 dev_kfree_skb(skb);
830
831 cons_rx_buf->page = page;
832
833 /* Update prod since possibly some pages have been
834 * allocated already.
835 */
836 rxr->rx_agg_prod = prod;
837 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
838 return NULL;
839 }
840
Michael Chan2839f282016-04-25 02:30:50 -0400841 dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
Michael Chanc0c050c2015-10-22 16:01:17 -0400842 PCI_DMA_FROMDEVICE);
843
844 skb->data_len += frag_len;
845 skb->len += frag_len;
846 skb->truesize += PAGE_SIZE;
847
848 prod = NEXT_RX_AGG(prod);
849 cp_cons = NEXT_CMP(cp_cons);
850 }
851 rxr->rx_agg_prod = prod;
852 return skb;
853}
854
855static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
856 u8 agg_bufs, u32 *raw_cons)
857{
858 u16 last;
859 struct rx_agg_cmp *agg;
860
861 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
862 last = RING_CMP(*raw_cons);
863 agg = (struct rx_agg_cmp *)
864 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
865 return RX_AGG_CMP_VALID(agg, *raw_cons);
866}
867
868static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
869 unsigned int len,
870 dma_addr_t mapping)
871{
872 struct bnxt *bp = bnapi->bp;
873 struct pci_dev *pdev = bp->pdev;
874 struct sk_buff *skb;
875
876 skb = napi_alloc_skb(&bnapi->napi, len);
877 if (!skb)
878 return NULL;
879
880 dma_sync_single_for_cpu(&pdev->dev, mapping,
881 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
882
883 memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
884
885 dma_sync_single_for_device(&pdev->dev, mapping,
886 bp->rx_copy_thresh,
887 PCI_DMA_FROMDEVICE);
888
889 skb_put(skb, len);
890 return skb;
891}
892
Michael Chanfa7e2812016-05-10 19:18:00 -0400893static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
894 u32 *raw_cons, void *cmp)
895{
896 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
897 struct rx_cmp *rxcmp = cmp;
898 u32 tmp_raw_cons = *raw_cons;
899 u8 cmp_type, agg_bufs = 0;
900
901 cmp_type = RX_CMP_TYPE(rxcmp);
902
903 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
904 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
905 RX_CMP_AGG_BUFS) >>
906 RX_CMP_AGG_BUFS_SHIFT;
907 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
908 struct rx_tpa_end_cmp *tpa_end = cmp;
909
910 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
911 RX_TPA_END_CMP_AGG_BUFS) >>
912 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
913 }
914
915 if (agg_bufs) {
916 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
917 return -EBUSY;
918 }
919 *raw_cons = tmp_raw_cons;
920 return 0;
921}
922
923static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
924{
925 if (!rxr->bnapi->in_reset) {
926 rxr->bnapi->in_reset = true;
927 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
928 schedule_work(&bp->sp_task);
929 }
930 rxr->rx_next_cons = 0xffff;
931}
932
Michael Chanc0c050c2015-10-22 16:01:17 -0400933static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
934 struct rx_tpa_start_cmp *tpa_start,
935 struct rx_tpa_start_cmp_ext *tpa_start1)
936{
937 u8 agg_id = TPA_START_AGG_ID(tpa_start);
938 u16 cons, prod;
939 struct bnxt_tpa_info *tpa_info;
940 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
941 struct rx_bd *prod_bd;
942 dma_addr_t mapping;
943
944 cons = tpa_start->rx_tpa_start_cmp_opaque;
945 prod = rxr->rx_prod;
946 cons_rx_buf = &rxr->rx_buf_ring[cons];
947 prod_rx_buf = &rxr->rx_buf_ring[prod];
948 tpa_info = &rxr->rx_tpa[agg_id];
949
Michael Chanfa7e2812016-05-10 19:18:00 -0400950 if (unlikely(cons != rxr->rx_next_cons)) {
951 bnxt_sched_reset(bp, rxr);
952 return;
953 }
954
Michael Chanc0c050c2015-10-22 16:01:17 -0400955 prod_rx_buf->data = tpa_info->data;
956
957 mapping = tpa_info->mapping;
958 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
959
960 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
961
962 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
963
964 tpa_info->data = cons_rx_buf->data;
965 cons_rx_buf->data = NULL;
966 tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
967
968 tpa_info->len =
969 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
970 RX_TPA_START_CMP_LEN_SHIFT;
971 if (likely(TPA_START_HASH_VALID(tpa_start))) {
972 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
973
974 tpa_info->hash_type = PKT_HASH_TYPE_L4;
975 tpa_info->gso_type = SKB_GSO_TCPV4;
976 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
977 if (hash_type == 3)
978 tpa_info->gso_type = SKB_GSO_TCPV6;
979 tpa_info->rss_hash =
980 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
981 } else {
982 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
983 tpa_info->gso_type = 0;
984 if (netif_msg_rx_err(bp))
985 netdev_warn(bp->dev, "TPA packet without valid hash\n");
986 }
987 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
988 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
Michael Chan94758f82016-06-13 02:25:35 -0400989 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
Michael Chanc0c050c2015-10-22 16:01:17 -0400990
991 rxr->rx_prod = NEXT_RX(prod);
992 cons = NEXT_RX(cons);
Michael Chan376a5b82016-05-10 19:17:59 -0400993 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -0400994 cons_rx_buf = &rxr->rx_buf_ring[cons];
995
996 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
997 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
998 cons_rx_buf->data = NULL;
999}
1000
1001static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1002 u16 cp_cons, u32 agg_bufs)
1003{
1004 if (agg_bufs)
1005 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1006}
1007
Michael Chan94758f82016-06-13 02:25:35 -04001008static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1009 int payload_off, int tcp_ts,
1010 struct sk_buff *skb)
1011{
1012#ifdef CONFIG_INET
1013 struct tcphdr *th;
1014 int len, nw_off;
1015 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1016 u32 hdr_info = tpa_info->hdr_info;
1017 bool loopback = false;
1018
1019 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1020 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1021 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1022
1023 /* If the packet is an internal loopback packet, the offsets will
1024 * have an extra 4 bytes.
1025 */
1026 if (inner_mac_off == 4) {
1027 loopback = true;
1028 } else if (inner_mac_off > 4) {
1029 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1030 ETH_HLEN - 2));
1031
1032 /* We only support inner iPv4/ipv6. If we don't see the
1033 * correct protocol ID, it must be a loopback packet where
1034 * the offsets are off by 4.
1035 */
Dan Carpenter09a76362016-07-07 11:23:09 +03001036 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
Michael Chan94758f82016-06-13 02:25:35 -04001037 loopback = true;
1038 }
1039 if (loopback) {
1040 /* internal loopback packet, subtract all offsets by 4 */
1041 inner_ip_off -= 4;
1042 inner_mac_off -= 4;
1043 outer_ip_off -= 4;
1044 }
1045
1046 nw_off = inner_ip_off - ETH_HLEN;
1047 skb_set_network_header(skb, nw_off);
1048 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1049 struct ipv6hdr *iph = ipv6_hdr(skb);
1050
1051 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1052 len = skb->len - skb_transport_offset(skb);
1053 th = tcp_hdr(skb);
1054 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1055 } else {
1056 struct iphdr *iph = ip_hdr(skb);
1057
1058 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1059 len = skb->len - skb_transport_offset(skb);
1060 th = tcp_hdr(skb);
1061 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1062 }
1063
1064 if (inner_mac_off) { /* tunnel */
1065 struct udphdr *uh = NULL;
1066 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1067 ETH_HLEN - 2));
1068
1069 if (proto == htons(ETH_P_IP)) {
1070 struct iphdr *iph = (struct iphdr *)skb->data;
1071
1072 if (iph->protocol == IPPROTO_UDP)
1073 uh = (struct udphdr *)(iph + 1);
1074 } else {
1075 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1076
1077 if (iph->nexthdr == IPPROTO_UDP)
1078 uh = (struct udphdr *)(iph + 1);
1079 }
1080 if (uh) {
1081 if (uh->check)
1082 skb_shinfo(skb)->gso_type |=
1083 SKB_GSO_UDP_TUNNEL_CSUM;
1084 else
1085 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1086 }
1087 }
1088#endif
1089 return skb;
1090}
1091
Michael Chanc0c050c2015-10-22 16:01:17 -04001092#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1093#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1094
Michael Chan309369c2016-06-13 02:25:34 -04001095static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1096 int payload_off, int tcp_ts,
Michael Chanc0c050c2015-10-22 16:01:17 -04001097 struct sk_buff *skb)
1098{
Michael Chand1611c32015-10-25 22:27:57 -04001099#ifdef CONFIG_INET
Michael Chanc0c050c2015-10-22 16:01:17 -04001100 struct tcphdr *th;
Michael Chan309369c2016-06-13 02:25:34 -04001101 int len, nw_off, tcp_opt_len;
Michael Chanc0c050c2015-10-22 16:01:17 -04001102
Michael Chan309369c2016-06-13 02:25:34 -04001103 if (tcp_ts)
Michael Chanc0c050c2015-10-22 16:01:17 -04001104 tcp_opt_len = 12;
1105
Michael Chanc0c050c2015-10-22 16:01:17 -04001106 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1107 struct iphdr *iph;
1108
1109 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1110 ETH_HLEN;
1111 skb_set_network_header(skb, nw_off);
1112 iph = ip_hdr(skb);
1113 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1114 len = skb->len - skb_transport_offset(skb);
1115 th = tcp_hdr(skb);
1116 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1117 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1118 struct ipv6hdr *iph;
1119
1120 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1121 ETH_HLEN;
1122 skb_set_network_header(skb, nw_off);
1123 iph = ipv6_hdr(skb);
1124 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1125 len = skb->len - skb_transport_offset(skb);
1126 th = tcp_hdr(skb);
1127 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1128 } else {
1129 dev_kfree_skb_any(skb);
1130 return NULL;
1131 }
1132 tcp_gro_complete(skb);
1133
1134 if (nw_off) { /* tunnel */
1135 struct udphdr *uh = NULL;
1136
1137 if (skb->protocol == htons(ETH_P_IP)) {
1138 struct iphdr *iph = (struct iphdr *)skb->data;
1139
1140 if (iph->protocol == IPPROTO_UDP)
1141 uh = (struct udphdr *)(iph + 1);
1142 } else {
1143 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1144
1145 if (iph->nexthdr == IPPROTO_UDP)
1146 uh = (struct udphdr *)(iph + 1);
1147 }
1148 if (uh) {
1149 if (uh->check)
1150 skb_shinfo(skb)->gso_type |=
1151 SKB_GSO_UDP_TUNNEL_CSUM;
1152 else
1153 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1154 }
1155 }
1156#endif
1157 return skb;
1158}
1159
Michael Chan309369c2016-06-13 02:25:34 -04001160static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1161 struct bnxt_tpa_info *tpa_info,
1162 struct rx_tpa_end_cmp *tpa_end,
1163 struct rx_tpa_end_cmp_ext *tpa_end1,
1164 struct sk_buff *skb)
1165{
1166#ifdef CONFIG_INET
1167 int payload_off;
1168 u16 segs;
1169
1170 segs = TPA_END_TPA_SEGS(tpa_end);
1171 if (segs == 1)
1172 return skb;
1173
1174 NAPI_GRO_CB(skb)->count = segs;
1175 skb_shinfo(skb)->gso_size =
1176 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1177 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1178 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1179 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1180 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1181 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1182#endif
1183 return skb;
1184}
1185
Michael Chanc0c050c2015-10-22 16:01:17 -04001186static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1187 struct bnxt_napi *bnapi,
1188 u32 *raw_cons,
1189 struct rx_tpa_end_cmp *tpa_end,
1190 struct rx_tpa_end_cmp_ext *tpa_end1,
1191 bool *agg_event)
1192{
1193 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001194 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001195 u8 agg_id = TPA_END_AGG_ID(tpa_end);
1196 u8 *data, agg_bufs;
1197 u16 cp_cons = RING_CMP(*raw_cons);
1198 unsigned int len;
1199 struct bnxt_tpa_info *tpa_info;
1200 dma_addr_t mapping;
1201 struct sk_buff *skb;
1202
Michael Chanfa7e2812016-05-10 19:18:00 -04001203 if (unlikely(bnapi->in_reset)) {
1204 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1205
1206 if (rc < 0)
1207 return ERR_PTR(-EBUSY);
1208 return NULL;
1209 }
1210
Michael Chanc0c050c2015-10-22 16:01:17 -04001211 tpa_info = &rxr->rx_tpa[agg_id];
1212 data = tpa_info->data;
1213 prefetch(data);
1214 len = tpa_info->len;
1215 mapping = tpa_info->mapping;
1216
1217 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1218 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1219
1220 if (agg_bufs) {
1221 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1222 return ERR_PTR(-EBUSY);
1223
1224 *agg_event = true;
1225 cp_cons = NEXT_CMP(cp_cons);
1226 }
1227
1228 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
1229 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1230 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1231 agg_bufs, (int)MAX_SKB_FRAGS);
1232 return NULL;
1233 }
1234
1235 if (len <= bp->rx_copy_thresh) {
1236 skb = bnxt_copy_skb(bnapi, data, len, mapping);
1237 if (!skb) {
1238 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1239 return NULL;
1240 }
1241 } else {
1242 u8 *new_data;
1243 dma_addr_t new_mapping;
1244
1245 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1246 if (!new_data) {
1247 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1248 return NULL;
1249 }
1250
1251 tpa_info->data = new_data;
1252 tpa_info->mapping = new_mapping;
1253
1254 skb = build_skb(data, 0);
1255 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
1256 PCI_DMA_FROMDEVICE);
1257
1258 if (!skb) {
1259 kfree(data);
1260 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1261 return NULL;
1262 }
1263 skb_reserve(skb, BNXT_RX_OFFSET);
1264 skb_put(skb, len);
1265 }
1266
1267 if (agg_bufs) {
1268 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1269 if (!skb) {
1270 /* Page reuse already handled by bnxt_rx_pages(). */
1271 return NULL;
1272 }
1273 }
1274 skb->protocol = eth_type_trans(skb, bp->dev);
1275
1276 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1277 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1278
Michael Chan8852ddb2016-06-06 02:37:16 -04001279 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1280 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001281 u16 vlan_proto = tpa_info->metadata >>
1282 RX_CMP_FLAGS2_METADATA_TPID_SFT;
Michael Chan8852ddb2016-06-06 02:37:16 -04001283 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001284
Michael Chan8852ddb2016-06-06 02:37:16 -04001285 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001286 }
1287
1288 skb_checksum_none_assert(skb);
1289 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1290 skb->ip_summed = CHECKSUM_UNNECESSARY;
1291 skb->csum_level =
1292 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1293 }
1294
1295 if (TPA_END_GRO(tpa_end))
Michael Chan309369c2016-06-13 02:25:34 -04001296 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001297
1298 return skb;
1299}
1300
1301/* returns the following:
1302 * 1 - 1 packet successfully received
1303 * 0 - successful TPA_START, packet not completed yet
1304 * -EBUSY - completion ring does not have all the agg buffers yet
1305 * -ENOMEM - packet aborted due to out of memory
1306 * -EIO - packet aborted due to hw error indicated in BD
1307 */
1308static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1309 bool *agg_event)
1310{
1311 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001312 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001313 struct net_device *dev = bp->dev;
1314 struct rx_cmp *rxcmp;
1315 struct rx_cmp_ext *rxcmp1;
1316 u32 tmp_raw_cons = *raw_cons;
1317 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1318 struct bnxt_sw_rx_bd *rx_buf;
1319 unsigned int len;
1320 u8 *data, agg_bufs, cmp_type;
1321 dma_addr_t dma_addr;
1322 struct sk_buff *skb;
1323 int rc = 0;
1324
1325 rxcmp = (struct rx_cmp *)
1326 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1327
1328 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1329 cp_cons = RING_CMP(tmp_raw_cons);
1330 rxcmp1 = (struct rx_cmp_ext *)
1331 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1332
1333 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1334 return -EBUSY;
1335
1336 cmp_type = RX_CMP_TYPE(rxcmp);
1337
1338 prod = rxr->rx_prod;
1339
1340 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1341 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1342 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1343
1344 goto next_rx_no_prod;
1345
1346 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1347 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1348 (struct rx_tpa_end_cmp *)rxcmp,
1349 (struct rx_tpa_end_cmp_ext *)rxcmp1,
1350 agg_event);
1351
1352 if (unlikely(IS_ERR(skb)))
1353 return -EBUSY;
1354
1355 rc = -ENOMEM;
1356 if (likely(skb)) {
1357 skb_record_rx_queue(skb, bnapi->index);
1358 skb_mark_napi_id(skb, &bnapi->napi);
1359 if (bnxt_busy_polling(bnapi))
1360 netif_receive_skb(skb);
1361 else
1362 napi_gro_receive(&bnapi->napi, skb);
1363 rc = 1;
1364 }
1365 goto next_rx_no_prod;
1366 }
1367
1368 cons = rxcmp->rx_cmp_opaque;
1369 rx_buf = &rxr->rx_buf_ring[cons];
1370 data = rx_buf->data;
Michael Chanfa7e2812016-05-10 19:18:00 -04001371 if (unlikely(cons != rxr->rx_next_cons)) {
1372 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1373
1374 bnxt_sched_reset(bp, rxr);
1375 return rc1;
1376 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001377 prefetch(data);
1378
1379 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1380 RX_CMP_AGG_BUFS_SHIFT;
1381
1382 if (agg_bufs) {
1383 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1384 return -EBUSY;
1385
1386 cp_cons = NEXT_CMP(cp_cons);
1387 *agg_event = true;
1388 }
1389
1390 rx_buf->data = NULL;
1391 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1392 bnxt_reuse_rx_data(rxr, cons, data);
1393 if (agg_bufs)
1394 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1395
1396 rc = -EIO;
1397 goto next_rx;
1398 }
1399
1400 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1401 dma_addr = dma_unmap_addr(rx_buf, mapping);
1402
1403 if (len <= bp->rx_copy_thresh) {
1404 skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1405 bnxt_reuse_rx_data(rxr, cons, data);
1406 if (!skb) {
1407 rc = -ENOMEM;
1408 goto next_rx;
1409 }
1410 } else {
1411 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1412 if (!skb) {
1413 rc = -ENOMEM;
1414 goto next_rx;
1415 }
1416 }
1417
1418 if (agg_bufs) {
1419 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1420 if (!skb) {
1421 rc = -ENOMEM;
1422 goto next_rx;
1423 }
1424 }
1425
1426 if (RX_CMP_HASH_VALID(rxcmp)) {
1427 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1428 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1429
1430 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1431 if (hash_type != 1 && hash_type != 3)
1432 type = PKT_HASH_TYPE_L3;
1433 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1434 }
1435
1436 skb->protocol = eth_type_trans(skb, dev);
1437
Michael Chan8852ddb2016-06-06 02:37:16 -04001438 if ((rxcmp1->rx_cmp_flags2 &
1439 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1440 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001441 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
Michael Chan8852ddb2016-06-06 02:37:16 -04001442 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001443 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1444
Michael Chan8852ddb2016-06-06 02:37:16 -04001445 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001446 }
1447
1448 skb_checksum_none_assert(skb);
1449 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1450 if (dev->features & NETIF_F_RXCSUM) {
1451 skb->ip_summed = CHECKSUM_UNNECESSARY;
1452 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1453 }
1454 } else {
Satish Baddipadige665e3502015-12-27 18:19:21 -05001455 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1456 if (dev->features & NETIF_F_RXCSUM)
1457 cpr->rx_l4_csum_errors++;
1458 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001459 }
1460
1461 skb_record_rx_queue(skb, bnapi->index);
1462 skb_mark_napi_id(skb, &bnapi->napi);
1463 if (bnxt_busy_polling(bnapi))
1464 netif_receive_skb(skb);
1465 else
1466 napi_gro_receive(&bnapi->napi, skb);
1467 rc = 1;
1468
1469next_rx:
1470 rxr->rx_prod = NEXT_RX(prod);
Michael Chan376a5b82016-05-10 19:17:59 -04001471 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001472
1473next_rx_no_prod:
1474 *raw_cons = tmp_raw_cons;
1475
1476 return rc;
1477}
1478
Michael Chan4bb13ab2016-04-05 14:09:01 -04001479#define BNXT_GET_EVENT_PORT(data) \
Michael Chan87c374d2016-12-02 21:17:16 -05001480 ((data) & \
1481 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
Michael Chan4bb13ab2016-04-05 14:09:01 -04001482
Michael Chanc0c050c2015-10-22 16:01:17 -04001483static int bnxt_async_event_process(struct bnxt *bp,
1484 struct hwrm_async_event_cmpl *cmpl)
1485{
1486 u16 event_id = le16_to_cpu(cmpl->event_id);
1487
1488 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1489 switch (event_id) {
Michael Chan87c374d2016-12-02 21:17:16 -05001490 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
Michael Chan8cbde112016-04-11 04:11:14 -04001491 u32 data1 = le32_to_cpu(cmpl->event_data1);
1492 struct bnxt_link_info *link_info = &bp->link_info;
1493
1494 if (BNXT_VF(bp))
1495 goto async_event_process_exit;
1496 if (data1 & 0x20000) {
1497 u16 fw_speed = link_info->force_link_speed;
1498 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1499
1500 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1501 speed);
1502 }
Michael Chan286ef9d2016-11-16 21:13:08 -05001503 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
Michael Chan8cbde112016-04-11 04:11:14 -04001504 /* fall thru */
1505 }
Michael Chan87c374d2016-12-02 21:17:16 -05001506 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
Michael Chanc0c050c2015-10-22 16:01:17 -04001507 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
Jeffrey Huang19241362016-02-26 04:00:00 -05001508 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001509 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
Jeffrey Huang19241362016-02-26 04:00:00 -05001510 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001511 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001512 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
Michael Chan4bb13ab2016-04-05 14:09:01 -04001513 u32 data1 = le32_to_cpu(cmpl->event_data1);
1514 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1515
1516 if (BNXT_VF(bp))
1517 break;
1518
1519 if (bp->pf.port_id != port_id)
1520 break;
1521
Michael Chan4bb13ab2016-04-05 14:09:01 -04001522 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1523 break;
1524 }
Michael Chan87c374d2016-12-02 21:17:16 -05001525 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
Michael Chanfc0f1922016-06-13 02:25:30 -04001526 if (BNXT_PF(bp))
1527 goto async_event_process_exit;
1528 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1529 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001530 default:
1531 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1532 event_id);
Jeffrey Huang19241362016-02-26 04:00:00 -05001533 goto async_event_process_exit;
Michael Chanc0c050c2015-10-22 16:01:17 -04001534 }
Jeffrey Huang19241362016-02-26 04:00:00 -05001535 schedule_work(&bp->sp_task);
1536async_event_process_exit:
Michael Chanc0c050c2015-10-22 16:01:17 -04001537 return 0;
1538}
1539
1540static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1541{
1542 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1543 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1544 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1545 (struct hwrm_fwd_req_cmpl *)txcmp;
1546
1547 switch (cmpl_type) {
1548 case CMPL_BASE_TYPE_HWRM_DONE:
1549 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1550 if (seq_id == bp->hwrm_intr_seq_id)
1551 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1552 else
1553 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1554 break;
1555
1556 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1557 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1558
1559 if ((vf_id < bp->pf.first_vf_id) ||
1560 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1561 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1562 vf_id);
1563 return -EINVAL;
1564 }
1565
1566 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1567 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1568 schedule_work(&bp->sp_task);
1569 break;
1570
1571 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1572 bnxt_async_event_process(bp,
1573 (struct hwrm_async_event_cmpl *)txcmp);
1574
1575 default:
1576 break;
1577 }
1578
1579 return 0;
1580}
1581
1582static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1583{
1584 struct bnxt_napi *bnapi = dev_instance;
1585 struct bnxt *bp = bnapi->bp;
1586 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1587 u32 cons = RING_CMP(cpr->cp_raw_cons);
1588
1589 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1590 napi_schedule(&bnapi->napi);
1591 return IRQ_HANDLED;
1592}
1593
1594static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1595{
1596 u32 raw_cons = cpr->cp_raw_cons;
1597 u16 cons = RING_CMP(raw_cons);
1598 struct tx_cmp *txcmp;
1599
1600 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1601
1602 return TX_CMP_VALID(txcmp, raw_cons);
1603}
1604
Michael Chanc0c050c2015-10-22 16:01:17 -04001605static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1606{
1607 struct bnxt_napi *bnapi = dev_instance;
1608 struct bnxt *bp = bnapi->bp;
1609 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1610 u32 cons = RING_CMP(cpr->cp_raw_cons);
1611 u32 int_status;
1612
1613 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1614
1615 if (!bnxt_has_work(bp, cpr)) {
Jeffrey Huang11809492015-11-05 16:25:49 -05001616 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001617 /* return if erroneous interrupt */
1618 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1619 return IRQ_NONE;
1620 }
1621
1622 /* disable ring IRQ */
1623 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1624
1625 /* Return here if interrupt is shared and is disabled. */
1626 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1627 return IRQ_HANDLED;
1628
1629 napi_schedule(&bnapi->napi);
1630 return IRQ_HANDLED;
1631}
1632
1633static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1634{
1635 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1636 u32 raw_cons = cpr->cp_raw_cons;
1637 u32 cons;
1638 int tx_pkts = 0;
1639 int rx_pkts = 0;
1640 bool rx_event = false;
1641 bool agg_event = false;
1642 struct tx_cmp *txcmp;
1643
1644 while (1) {
1645 int rc;
1646
1647 cons = RING_CMP(raw_cons);
1648 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1649
1650 if (!TX_CMP_VALID(txcmp, raw_cons))
1651 break;
1652
Michael Chan67a95e22016-05-04 16:56:43 -04001653 /* The valid test of the entry must be done first before
1654 * reading any further.
1655 */
Michael Chanb67daab2016-05-15 03:04:51 -04001656 dma_rmb();
Michael Chanc0c050c2015-10-22 16:01:17 -04001657 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1658 tx_pkts++;
1659 /* return full budget so NAPI will complete. */
1660 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1661 rx_pkts = budget;
1662 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1663 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1664 if (likely(rc >= 0))
1665 rx_pkts += rc;
1666 else if (rc == -EBUSY) /* partial completion */
1667 break;
1668 rx_event = true;
1669 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1670 CMPL_BASE_TYPE_HWRM_DONE) ||
1671 (TX_CMP_TYPE(txcmp) ==
1672 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1673 (TX_CMP_TYPE(txcmp) ==
1674 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1675 bnxt_hwrm_handler(bp, txcmp);
1676 }
1677 raw_cons = NEXT_RAW_CMP(raw_cons);
1678
1679 if (rx_pkts == budget)
1680 break;
1681 }
1682
1683 cpr->cp_raw_cons = raw_cons;
1684 /* ACK completion ring before freeing tx ring and producing new
1685 * buffers in rx/agg rings to prevent overflowing the completion
1686 * ring.
1687 */
1688 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1689
1690 if (tx_pkts)
1691 bnxt_tx_int(bp, bnapi, tx_pkts);
1692
1693 if (rx_event) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001694 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001695
1696 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1697 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1698 if (agg_event) {
1699 writel(DB_KEY_RX | rxr->rx_agg_prod,
1700 rxr->rx_agg_doorbell);
1701 writel(DB_KEY_RX | rxr->rx_agg_prod,
1702 rxr->rx_agg_doorbell);
1703 }
1704 }
1705 return rx_pkts;
1706}
1707
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04001708static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1709{
1710 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1711 struct bnxt *bp = bnapi->bp;
1712 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1713 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1714 struct tx_cmp *txcmp;
1715 struct rx_cmp_ext *rxcmp1;
1716 u32 cp_cons, tmp_raw_cons;
1717 u32 raw_cons = cpr->cp_raw_cons;
1718 u32 rx_pkts = 0;
1719 bool agg_event = false;
1720
1721 while (1) {
1722 int rc;
1723
1724 cp_cons = RING_CMP(raw_cons);
1725 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1726
1727 if (!TX_CMP_VALID(txcmp, raw_cons))
1728 break;
1729
1730 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1731 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1732 cp_cons = RING_CMP(tmp_raw_cons);
1733 rxcmp1 = (struct rx_cmp_ext *)
1734 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1735
1736 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1737 break;
1738
1739 /* force an error to recycle the buffer */
1740 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1741 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1742
1743 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1744 if (likely(rc == -EIO))
1745 rx_pkts++;
1746 else if (rc == -EBUSY) /* partial completion */
1747 break;
1748 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1749 CMPL_BASE_TYPE_HWRM_DONE)) {
1750 bnxt_hwrm_handler(bp, txcmp);
1751 } else {
1752 netdev_err(bp->dev,
1753 "Invalid completion received on special ring\n");
1754 }
1755 raw_cons = NEXT_RAW_CMP(raw_cons);
1756
1757 if (rx_pkts == budget)
1758 break;
1759 }
1760
1761 cpr->cp_raw_cons = raw_cons;
1762 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1763 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1764 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1765
1766 if (agg_event) {
1767 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1768 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1769 }
1770
1771 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
1772 napi_complete(napi);
1773 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1774 }
1775 return rx_pkts;
1776}
1777
Michael Chanc0c050c2015-10-22 16:01:17 -04001778static int bnxt_poll(struct napi_struct *napi, int budget)
1779{
1780 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1781 struct bnxt *bp = bnapi->bp;
1782 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1783 int work_done = 0;
1784
1785 if (!bnxt_lock_napi(bnapi))
1786 return budget;
1787
1788 while (1) {
1789 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1790
1791 if (work_done >= budget)
1792 break;
1793
1794 if (!bnxt_has_work(bp, cpr)) {
1795 napi_complete(napi);
1796 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1797 break;
1798 }
1799 }
1800 mmiowb();
1801 bnxt_unlock_napi(bnapi);
1802 return work_done;
1803}
1804
1805#ifdef CONFIG_NET_RX_BUSY_POLL
1806static int bnxt_busy_poll(struct napi_struct *napi)
1807{
1808 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1809 struct bnxt *bp = bnapi->bp;
1810 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1811 int rx_work, budget = 4;
1812
1813 if (atomic_read(&bp->intr_sem) != 0)
1814 return LL_FLUSH_FAILED;
1815
Andy Gospodarek867d1212016-11-22 13:14:08 -05001816 if (!bp->link_info.link_up)
1817 return LL_FLUSH_FAILED;
1818
Michael Chanc0c050c2015-10-22 16:01:17 -04001819 if (!bnxt_lock_poll(bnapi))
1820 return LL_FLUSH_BUSY;
1821
1822 rx_work = bnxt_poll_work(bp, bnapi, budget);
1823
1824 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1825
1826 bnxt_unlock_poll(bnapi);
1827 return rx_work;
1828}
1829#endif
1830
1831static void bnxt_free_tx_skbs(struct bnxt *bp)
1832{
1833 int i, max_idx;
1834 struct pci_dev *pdev = bp->pdev;
1835
Michael Chanb6ab4b02016-01-02 23:44:59 -05001836 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001837 return;
1838
1839 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1840 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001841 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001842 int j;
1843
Michael Chanc0c050c2015-10-22 16:01:17 -04001844 for (j = 0; j < max_idx;) {
1845 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1846 struct sk_buff *skb = tx_buf->skb;
1847 int k, last;
1848
1849 if (!skb) {
1850 j++;
1851 continue;
1852 }
1853
1854 tx_buf->skb = NULL;
1855
1856 if (tx_buf->is_push) {
1857 dev_kfree_skb(skb);
1858 j += 2;
1859 continue;
1860 }
1861
1862 dma_unmap_single(&pdev->dev,
1863 dma_unmap_addr(tx_buf, mapping),
1864 skb_headlen(skb),
1865 PCI_DMA_TODEVICE);
1866
1867 last = tx_buf->nr_frags;
1868 j += 2;
Michael Chand612a572016-01-28 03:11:22 -05001869 for (k = 0; k < last; k++, j++) {
1870 int ring_idx = j & bp->tx_ring_mask;
Michael Chanc0c050c2015-10-22 16:01:17 -04001871 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1872
Michael Chand612a572016-01-28 03:11:22 -05001873 tx_buf = &txr->tx_buf_ring[ring_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04001874 dma_unmap_page(
1875 &pdev->dev,
1876 dma_unmap_addr(tx_buf, mapping),
1877 skb_frag_size(frag), PCI_DMA_TODEVICE);
1878 }
1879 dev_kfree_skb(skb);
1880 }
1881 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1882 }
1883}
1884
1885static void bnxt_free_rx_skbs(struct bnxt *bp)
1886{
1887 int i, max_idx, max_agg_idx;
1888 struct pci_dev *pdev = bp->pdev;
1889
Michael Chanb6ab4b02016-01-02 23:44:59 -05001890 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001891 return;
1892
1893 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1894 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1895 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001896 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001897 int j;
1898
Michael Chanc0c050c2015-10-22 16:01:17 -04001899 if (rxr->rx_tpa) {
1900 for (j = 0; j < MAX_TPA; j++) {
1901 struct bnxt_tpa_info *tpa_info =
1902 &rxr->rx_tpa[j];
1903 u8 *data = tpa_info->data;
1904
1905 if (!data)
1906 continue;
1907
1908 dma_unmap_single(
1909 &pdev->dev,
1910 dma_unmap_addr(tpa_info, mapping),
1911 bp->rx_buf_use_size,
1912 PCI_DMA_FROMDEVICE);
1913
1914 tpa_info->data = NULL;
1915
1916 kfree(data);
1917 }
1918 }
1919
1920 for (j = 0; j < max_idx; j++) {
1921 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1922 u8 *data = rx_buf->data;
1923
1924 if (!data)
1925 continue;
1926
1927 dma_unmap_single(&pdev->dev,
1928 dma_unmap_addr(rx_buf, mapping),
1929 bp->rx_buf_use_size,
1930 PCI_DMA_FROMDEVICE);
1931
1932 rx_buf->data = NULL;
1933
1934 kfree(data);
1935 }
1936
1937 for (j = 0; j < max_agg_idx; j++) {
1938 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1939 &rxr->rx_agg_ring[j];
1940 struct page *page = rx_agg_buf->page;
1941
1942 if (!page)
1943 continue;
1944
1945 dma_unmap_page(&pdev->dev,
1946 dma_unmap_addr(rx_agg_buf, mapping),
Michael Chan2839f282016-04-25 02:30:50 -04001947 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
Michael Chanc0c050c2015-10-22 16:01:17 -04001948
1949 rx_agg_buf->page = NULL;
1950 __clear_bit(j, rxr->rx_agg_bmap);
1951
1952 __free_page(page);
1953 }
Michael Chan89d0a062016-04-25 02:30:51 -04001954 if (rxr->rx_page) {
1955 __free_page(rxr->rx_page);
1956 rxr->rx_page = NULL;
1957 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001958 }
1959}
1960
1961static void bnxt_free_skbs(struct bnxt *bp)
1962{
1963 bnxt_free_tx_skbs(bp);
1964 bnxt_free_rx_skbs(bp);
1965}
1966
1967static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1968{
1969 struct pci_dev *pdev = bp->pdev;
1970 int i;
1971
1972 for (i = 0; i < ring->nr_pages; i++) {
1973 if (!ring->pg_arr[i])
1974 continue;
1975
1976 dma_free_coherent(&pdev->dev, ring->page_size,
1977 ring->pg_arr[i], ring->dma_arr[i]);
1978
1979 ring->pg_arr[i] = NULL;
1980 }
1981 if (ring->pg_tbl) {
1982 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1983 ring->pg_tbl, ring->pg_tbl_map);
1984 ring->pg_tbl = NULL;
1985 }
1986 if (ring->vmem_size && *ring->vmem) {
1987 vfree(*ring->vmem);
1988 *ring->vmem = NULL;
1989 }
1990}
1991
1992static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1993{
1994 int i;
1995 struct pci_dev *pdev = bp->pdev;
1996
1997 if (ring->nr_pages > 1) {
1998 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1999 ring->nr_pages * 8,
2000 &ring->pg_tbl_map,
2001 GFP_KERNEL);
2002 if (!ring->pg_tbl)
2003 return -ENOMEM;
2004 }
2005
2006 for (i = 0; i < ring->nr_pages; i++) {
2007 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2008 ring->page_size,
2009 &ring->dma_arr[i],
2010 GFP_KERNEL);
2011 if (!ring->pg_arr[i])
2012 return -ENOMEM;
2013
2014 if (ring->nr_pages > 1)
2015 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
2016 }
2017
2018 if (ring->vmem_size) {
2019 *ring->vmem = vzalloc(ring->vmem_size);
2020 if (!(*ring->vmem))
2021 return -ENOMEM;
2022 }
2023 return 0;
2024}
2025
2026static void bnxt_free_rx_rings(struct bnxt *bp)
2027{
2028 int i;
2029
Michael Chanb6ab4b02016-01-02 23:44:59 -05002030 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002031 return;
2032
2033 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002034 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002035 struct bnxt_ring_struct *ring;
2036
Michael Chanc0c050c2015-10-22 16:01:17 -04002037 kfree(rxr->rx_tpa);
2038 rxr->rx_tpa = NULL;
2039
2040 kfree(rxr->rx_agg_bmap);
2041 rxr->rx_agg_bmap = NULL;
2042
2043 ring = &rxr->rx_ring_struct;
2044 bnxt_free_ring(bp, ring);
2045
2046 ring = &rxr->rx_agg_ring_struct;
2047 bnxt_free_ring(bp, ring);
2048 }
2049}
2050
2051static int bnxt_alloc_rx_rings(struct bnxt *bp)
2052{
2053 int i, rc, agg_rings = 0, tpa_rings = 0;
2054
Michael Chanb6ab4b02016-01-02 23:44:59 -05002055 if (!bp->rx_ring)
2056 return -ENOMEM;
2057
Michael Chanc0c050c2015-10-22 16:01:17 -04002058 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2059 agg_rings = 1;
2060
2061 if (bp->flags & BNXT_FLAG_TPA)
2062 tpa_rings = 1;
2063
2064 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002065 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002066 struct bnxt_ring_struct *ring;
2067
Michael Chanc0c050c2015-10-22 16:01:17 -04002068 ring = &rxr->rx_ring_struct;
2069
2070 rc = bnxt_alloc_ring(bp, ring);
2071 if (rc)
2072 return rc;
2073
2074 if (agg_rings) {
2075 u16 mem_size;
2076
2077 ring = &rxr->rx_agg_ring_struct;
2078 rc = bnxt_alloc_ring(bp, ring);
2079 if (rc)
2080 return rc;
2081
2082 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2083 mem_size = rxr->rx_agg_bmap_size / 8;
2084 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2085 if (!rxr->rx_agg_bmap)
2086 return -ENOMEM;
2087
2088 if (tpa_rings) {
2089 rxr->rx_tpa = kcalloc(MAX_TPA,
2090 sizeof(struct bnxt_tpa_info),
2091 GFP_KERNEL);
2092 if (!rxr->rx_tpa)
2093 return -ENOMEM;
2094 }
2095 }
2096 }
2097 return 0;
2098}
2099
2100static void bnxt_free_tx_rings(struct bnxt *bp)
2101{
2102 int i;
2103 struct pci_dev *pdev = bp->pdev;
2104
Michael Chanb6ab4b02016-01-02 23:44:59 -05002105 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002106 return;
2107
2108 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002109 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002110 struct bnxt_ring_struct *ring;
2111
Michael Chanc0c050c2015-10-22 16:01:17 -04002112 if (txr->tx_push) {
2113 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2114 txr->tx_push, txr->tx_push_mapping);
2115 txr->tx_push = NULL;
2116 }
2117
2118 ring = &txr->tx_ring_struct;
2119
2120 bnxt_free_ring(bp, ring);
2121 }
2122}
2123
2124static int bnxt_alloc_tx_rings(struct bnxt *bp)
2125{
2126 int i, j, rc;
2127 struct pci_dev *pdev = bp->pdev;
2128
2129 bp->tx_push_size = 0;
2130 if (bp->tx_push_thresh) {
2131 int push_size;
2132
2133 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2134 bp->tx_push_thresh);
2135
Michael Chan4419dbe2016-02-10 17:33:49 -05002136 if (push_size > 256) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002137 push_size = 0;
2138 bp->tx_push_thresh = 0;
2139 }
2140
2141 bp->tx_push_size = push_size;
2142 }
2143
2144 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002145 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002146 struct bnxt_ring_struct *ring;
2147
Michael Chanc0c050c2015-10-22 16:01:17 -04002148 ring = &txr->tx_ring_struct;
2149
2150 rc = bnxt_alloc_ring(bp, ring);
2151 if (rc)
2152 return rc;
2153
2154 if (bp->tx_push_size) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002155 dma_addr_t mapping;
2156
2157 /* One pre-allocated DMA buffer to backup
2158 * TX push operation
2159 */
2160 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2161 bp->tx_push_size,
2162 &txr->tx_push_mapping,
2163 GFP_KERNEL);
2164
2165 if (!txr->tx_push)
2166 return -ENOMEM;
2167
Michael Chanc0c050c2015-10-22 16:01:17 -04002168 mapping = txr->tx_push_mapping +
2169 sizeof(struct tx_push_bd);
Michael Chan4419dbe2016-02-10 17:33:49 -05002170 txr->data_mapping = cpu_to_le64(mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -04002171
Michael Chan4419dbe2016-02-10 17:33:49 -05002172 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
Michael Chanc0c050c2015-10-22 16:01:17 -04002173 }
2174 ring->queue_id = bp->q_info[j].queue_id;
2175 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2176 j++;
2177 }
2178 return 0;
2179}
2180
2181static void bnxt_free_cp_rings(struct bnxt *bp)
2182{
2183 int i;
2184
2185 if (!bp->bnapi)
2186 return;
2187
2188 for (i = 0; i < bp->cp_nr_rings; i++) {
2189 struct bnxt_napi *bnapi = bp->bnapi[i];
2190 struct bnxt_cp_ring_info *cpr;
2191 struct bnxt_ring_struct *ring;
2192
2193 if (!bnapi)
2194 continue;
2195
2196 cpr = &bnapi->cp_ring;
2197 ring = &cpr->cp_ring_struct;
2198
2199 bnxt_free_ring(bp, ring);
2200 }
2201}
2202
2203static int bnxt_alloc_cp_rings(struct bnxt *bp)
2204{
2205 int i, rc;
2206
2207 for (i = 0; i < bp->cp_nr_rings; i++) {
2208 struct bnxt_napi *bnapi = bp->bnapi[i];
2209 struct bnxt_cp_ring_info *cpr;
2210 struct bnxt_ring_struct *ring;
2211
2212 if (!bnapi)
2213 continue;
2214
2215 cpr = &bnapi->cp_ring;
2216 ring = &cpr->cp_ring_struct;
2217
2218 rc = bnxt_alloc_ring(bp, ring);
2219 if (rc)
2220 return rc;
2221 }
2222 return 0;
2223}
2224
2225static void bnxt_init_ring_struct(struct bnxt *bp)
2226{
2227 int i;
2228
2229 for (i = 0; i < bp->cp_nr_rings; i++) {
2230 struct bnxt_napi *bnapi = bp->bnapi[i];
2231 struct bnxt_cp_ring_info *cpr;
2232 struct bnxt_rx_ring_info *rxr;
2233 struct bnxt_tx_ring_info *txr;
2234 struct bnxt_ring_struct *ring;
2235
2236 if (!bnapi)
2237 continue;
2238
2239 cpr = &bnapi->cp_ring;
2240 ring = &cpr->cp_ring_struct;
2241 ring->nr_pages = bp->cp_nr_pages;
2242 ring->page_size = HW_CMPD_RING_SIZE;
2243 ring->pg_arr = (void **)cpr->cp_desc_ring;
2244 ring->dma_arr = cpr->cp_desc_mapping;
2245 ring->vmem_size = 0;
2246
Michael Chanb6ab4b02016-01-02 23:44:59 -05002247 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002248 if (!rxr)
2249 goto skip_rx;
2250
Michael Chanc0c050c2015-10-22 16:01:17 -04002251 ring = &rxr->rx_ring_struct;
2252 ring->nr_pages = bp->rx_nr_pages;
2253 ring->page_size = HW_RXBD_RING_SIZE;
2254 ring->pg_arr = (void **)rxr->rx_desc_ring;
2255 ring->dma_arr = rxr->rx_desc_mapping;
2256 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2257 ring->vmem = (void **)&rxr->rx_buf_ring;
2258
2259 ring = &rxr->rx_agg_ring_struct;
2260 ring->nr_pages = bp->rx_agg_nr_pages;
2261 ring->page_size = HW_RXBD_RING_SIZE;
2262 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2263 ring->dma_arr = rxr->rx_agg_desc_mapping;
2264 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2265 ring->vmem = (void **)&rxr->rx_agg_ring;
2266
Michael Chan3b2b7d92016-01-02 23:45:00 -05002267skip_rx:
Michael Chanb6ab4b02016-01-02 23:44:59 -05002268 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002269 if (!txr)
2270 continue;
2271
Michael Chanc0c050c2015-10-22 16:01:17 -04002272 ring = &txr->tx_ring_struct;
2273 ring->nr_pages = bp->tx_nr_pages;
2274 ring->page_size = HW_RXBD_RING_SIZE;
2275 ring->pg_arr = (void **)txr->tx_desc_ring;
2276 ring->dma_arr = txr->tx_desc_mapping;
2277 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2278 ring->vmem = (void **)&txr->tx_buf_ring;
2279 }
2280}
2281
2282static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2283{
2284 int i;
2285 u32 prod;
2286 struct rx_bd **rx_buf_ring;
2287
2288 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2289 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2290 int j;
2291 struct rx_bd *rxbd;
2292
2293 rxbd = rx_buf_ring[i];
2294 if (!rxbd)
2295 continue;
2296
2297 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2298 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2299 rxbd->rx_bd_opaque = prod;
2300 }
2301 }
2302}
2303
2304static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2305{
2306 struct net_device *dev = bp->dev;
Michael Chanc0c050c2015-10-22 16:01:17 -04002307 struct bnxt_rx_ring_info *rxr;
2308 struct bnxt_ring_struct *ring;
2309 u32 prod, type;
2310 int i;
2311
Michael Chanc0c050c2015-10-22 16:01:17 -04002312 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2313 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2314
2315 if (NET_IP_ALIGN == 2)
2316 type |= RX_BD_FLAGS_SOP;
2317
Michael Chanb6ab4b02016-01-02 23:44:59 -05002318 rxr = &bp->rx_ring[ring_nr];
Michael Chanc0c050c2015-10-22 16:01:17 -04002319 ring = &rxr->rx_ring_struct;
2320 bnxt_init_rxbd_pages(ring, type);
2321
2322 prod = rxr->rx_prod;
2323 for (i = 0; i < bp->rx_ring_size; i++) {
2324 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2325 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2326 ring_nr, i, bp->rx_ring_size);
2327 break;
2328 }
2329 prod = NEXT_RX(prod);
2330 }
2331 rxr->rx_prod = prod;
2332 ring->fw_ring_id = INVALID_HW_RING_ID;
2333
Michael Chanedd0c2c2015-12-27 18:19:19 -05002334 ring = &rxr->rx_agg_ring_struct;
2335 ring->fw_ring_id = INVALID_HW_RING_ID;
2336
Michael Chanc0c050c2015-10-22 16:01:17 -04002337 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2338 return 0;
2339
Michael Chan2839f282016-04-25 02:30:50 -04002340 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
Michael Chanc0c050c2015-10-22 16:01:17 -04002341 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2342
2343 bnxt_init_rxbd_pages(ring, type);
2344
2345 prod = rxr->rx_agg_prod;
2346 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2347 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2348 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2349 ring_nr, i, bp->rx_ring_size);
2350 break;
2351 }
2352 prod = NEXT_RX_AGG(prod);
2353 }
2354 rxr->rx_agg_prod = prod;
Michael Chanc0c050c2015-10-22 16:01:17 -04002355
2356 if (bp->flags & BNXT_FLAG_TPA) {
2357 if (rxr->rx_tpa) {
2358 u8 *data;
2359 dma_addr_t mapping;
2360
2361 for (i = 0; i < MAX_TPA; i++) {
2362 data = __bnxt_alloc_rx_data(bp, &mapping,
2363 GFP_KERNEL);
2364 if (!data)
2365 return -ENOMEM;
2366
2367 rxr->rx_tpa[i].data = data;
2368 rxr->rx_tpa[i].mapping = mapping;
2369 }
2370 } else {
2371 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2372 return -ENOMEM;
2373 }
2374 }
2375
2376 return 0;
2377}
2378
2379static int bnxt_init_rx_rings(struct bnxt *bp)
2380{
2381 int i, rc = 0;
2382
2383 for (i = 0; i < bp->rx_nr_rings; i++) {
2384 rc = bnxt_init_one_rx_ring(bp, i);
2385 if (rc)
2386 break;
2387 }
2388
2389 return rc;
2390}
2391
2392static int bnxt_init_tx_rings(struct bnxt *bp)
2393{
2394 u16 i;
2395
2396 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2397 MAX_SKB_FRAGS + 1);
2398
2399 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002400 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002401 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2402
2403 ring->fw_ring_id = INVALID_HW_RING_ID;
2404 }
2405
2406 return 0;
2407}
2408
2409static void bnxt_free_ring_grps(struct bnxt *bp)
2410{
2411 kfree(bp->grp_info);
2412 bp->grp_info = NULL;
2413}
2414
2415static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2416{
2417 int i;
2418
2419 if (irq_re_init) {
2420 bp->grp_info = kcalloc(bp->cp_nr_rings,
2421 sizeof(struct bnxt_ring_grp_info),
2422 GFP_KERNEL);
2423 if (!bp->grp_info)
2424 return -ENOMEM;
2425 }
2426 for (i = 0; i < bp->cp_nr_rings; i++) {
2427 if (irq_re_init)
2428 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2429 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2430 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2431 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2432 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2433 }
2434 return 0;
2435}
2436
2437static void bnxt_free_vnics(struct bnxt *bp)
2438{
2439 kfree(bp->vnic_info);
2440 bp->vnic_info = NULL;
2441 bp->nr_vnics = 0;
2442}
2443
2444static int bnxt_alloc_vnics(struct bnxt *bp)
2445{
2446 int num_vnics = 1;
2447
2448#ifdef CONFIG_RFS_ACCEL
2449 if (bp->flags & BNXT_FLAG_RFS)
2450 num_vnics += bp->rx_nr_rings;
2451#endif
2452
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04002453 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2454 num_vnics++;
2455
Michael Chanc0c050c2015-10-22 16:01:17 -04002456 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2457 GFP_KERNEL);
2458 if (!bp->vnic_info)
2459 return -ENOMEM;
2460
2461 bp->nr_vnics = num_vnics;
2462 return 0;
2463}
2464
2465static void bnxt_init_vnics(struct bnxt *bp)
2466{
2467 int i;
2468
2469 for (i = 0; i < bp->nr_vnics; i++) {
2470 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2471
2472 vnic->fw_vnic_id = INVALID_HW_RING_ID;
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04002473 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2474 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04002475 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2476
2477 if (bp->vnic_info[i].rss_hash_key) {
2478 if (i == 0)
2479 prandom_bytes(vnic->rss_hash_key,
2480 HW_HASH_KEY_SIZE);
2481 else
2482 memcpy(vnic->rss_hash_key,
2483 bp->vnic_info[0].rss_hash_key,
2484 HW_HASH_KEY_SIZE);
2485 }
2486 }
2487}
2488
2489static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2490{
2491 int pages;
2492
2493 pages = ring_size / desc_per_pg;
2494
2495 if (!pages)
2496 return 1;
2497
2498 pages++;
2499
2500 while (pages & (pages - 1))
2501 pages++;
2502
2503 return pages;
2504}
2505
2506static void bnxt_set_tpa_flags(struct bnxt *bp)
2507{
2508 bp->flags &= ~BNXT_FLAG_TPA;
2509 if (bp->dev->features & NETIF_F_LRO)
2510 bp->flags |= BNXT_FLAG_LRO;
Michael Chan94758f82016-06-13 02:25:35 -04002511 if (bp->dev->features & NETIF_F_GRO)
Michael Chanc0c050c2015-10-22 16:01:17 -04002512 bp->flags |= BNXT_FLAG_GRO;
2513}
2514
2515/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2516 * be set on entry.
2517 */
2518void bnxt_set_ring_params(struct bnxt *bp)
2519{
2520 u32 ring_size, rx_size, rx_space;
2521 u32 agg_factor = 0, agg_ring_size = 0;
2522
2523 /* 8 for CRC and VLAN */
2524 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2525
2526 rx_space = rx_size + NET_SKB_PAD +
2527 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2528
2529 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2530 ring_size = bp->rx_ring_size;
2531 bp->rx_agg_ring_size = 0;
2532 bp->rx_agg_nr_pages = 0;
2533
2534 if (bp->flags & BNXT_FLAG_TPA)
Michael Chan2839f282016-04-25 02:30:50 -04002535 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
Michael Chanc0c050c2015-10-22 16:01:17 -04002536
2537 bp->flags &= ~BNXT_FLAG_JUMBO;
2538 if (rx_space > PAGE_SIZE) {
2539 u32 jumbo_factor;
2540
2541 bp->flags |= BNXT_FLAG_JUMBO;
2542 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2543 if (jumbo_factor > agg_factor)
2544 agg_factor = jumbo_factor;
2545 }
2546 agg_ring_size = ring_size * agg_factor;
2547
2548 if (agg_ring_size) {
2549 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2550 RX_DESC_CNT);
2551 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2552 u32 tmp = agg_ring_size;
2553
2554 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2555 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2556 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2557 tmp, agg_ring_size);
2558 }
2559 bp->rx_agg_ring_size = agg_ring_size;
2560 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2561 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2562 rx_space = rx_size + NET_SKB_PAD +
2563 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2564 }
2565
2566 bp->rx_buf_use_size = rx_size;
2567 bp->rx_buf_size = rx_space;
2568
2569 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2570 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2571
2572 ring_size = bp->tx_ring_size;
2573 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2574 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2575
2576 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2577 bp->cp_ring_size = ring_size;
2578
2579 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2580 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2581 bp->cp_nr_pages = MAX_CP_PAGES;
2582 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2583 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2584 ring_size, bp->cp_ring_size);
2585 }
2586 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2587 bp->cp_ring_mask = bp->cp_bit - 1;
2588}
2589
2590static void bnxt_free_vnic_attributes(struct bnxt *bp)
2591{
2592 int i;
2593 struct bnxt_vnic_info *vnic;
2594 struct pci_dev *pdev = bp->pdev;
2595
2596 if (!bp->vnic_info)
2597 return;
2598
2599 for (i = 0; i < bp->nr_vnics; i++) {
2600 vnic = &bp->vnic_info[i];
2601
2602 kfree(vnic->fw_grp_ids);
2603 vnic->fw_grp_ids = NULL;
2604
2605 kfree(vnic->uc_list);
2606 vnic->uc_list = NULL;
2607
2608 if (vnic->mc_list) {
2609 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2610 vnic->mc_list, vnic->mc_list_mapping);
2611 vnic->mc_list = NULL;
2612 }
2613
2614 if (vnic->rss_table) {
2615 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2616 vnic->rss_table,
2617 vnic->rss_table_dma_addr);
2618 vnic->rss_table = NULL;
2619 }
2620
2621 vnic->rss_hash_key = NULL;
2622 vnic->flags = 0;
2623 }
2624}
2625
2626static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2627{
2628 int i, rc = 0, size;
2629 struct bnxt_vnic_info *vnic;
2630 struct pci_dev *pdev = bp->pdev;
2631 int max_rings;
2632
2633 for (i = 0; i < bp->nr_vnics; i++) {
2634 vnic = &bp->vnic_info[i];
2635
2636 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2637 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2638
2639 if (mem_size > 0) {
2640 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2641 if (!vnic->uc_list) {
2642 rc = -ENOMEM;
2643 goto out;
2644 }
2645 }
2646 }
2647
2648 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2649 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2650 vnic->mc_list =
2651 dma_alloc_coherent(&pdev->dev,
2652 vnic->mc_list_size,
2653 &vnic->mc_list_mapping,
2654 GFP_KERNEL);
2655 if (!vnic->mc_list) {
2656 rc = -ENOMEM;
2657 goto out;
2658 }
2659 }
2660
2661 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2662 max_rings = bp->rx_nr_rings;
2663 else
2664 max_rings = 1;
2665
2666 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2667 if (!vnic->fw_grp_ids) {
2668 rc = -ENOMEM;
2669 goto out;
2670 }
2671
2672 /* Allocate rss table and hash key */
2673 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2674 &vnic->rss_table_dma_addr,
2675 GFP_KERNEL);
2676 if (!vnic->rss_table) {
2677 rc = -ENOMEM;
2678 goto out;
2679 }
2680
2681 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2682
2683 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2684 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2685 }
2686 return 0;
2687
2688out:
2689 return rc;
2690}
2691
2692static void bnxt_free_hwrm_resources(struct bnxt *bp)
2693{
2694 struct pci_dev *pdev = bp->pdev;
2695
2696 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2697 bp->hwrm_cmd_resp_dma_addr);
2698
2699 bp->hwrm_cmd_resp_addr = NULL;
2700 if (bp->hwrm_dbg_resp_addr) {
2701 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2702 bp->hwrm_dbg_resp_addr,
2703 bp->hwrm_dbg_resp_dma_addr);
2704
2705 bp->hwrm_dbg_resp_addr = NULL;
2706 }
2707}
2708
2709static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2710{
2711 struct pci_dev *pdev = bp->pdev;
2712
2713 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2714 &bp->hwrm_cmd_resp_dma_addr,
2715 GFP_KERNEL);
2716 if (!bp->hwrm_cmd_resp_addr)
2717 return -ENOMEM;
2718 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2719 HWRM_DBG_REG_BUF_SIZE,
2720 &bp->hwrm_dbg_resp_dma_addr,
2721 GFP_KERNEL);
2722 if (!bp->hwrm_dbg_resp_addr)
2723 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2724
2725 return 0;
2726}
2727
2728static void bnxt_free_stats(struct bnxt *bp)
2729{
2730 u32 size, i;
2731 struct pci_dev *pdev = bp->pdev;
2732
Michael Chan3bdf56c2016-03-07 15:38:45 -05002733 if (bp->hw_rx_port_stats) {
2734 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2735 bp->hw_rx_port_stats,
2736 bp->hw_rx_port_stats_map);
2737 bp->hw_rx_port_stats = NULL;
2738 bp->flags &= ~BNXT_FLAG_PORT_STATS;
2739 }
2740
Michael Chanc0c050c2015-10-22 16:01:17 -04002741 if (!bp->bnapi)
2742 return;
2743
2744 size = sizeof(struct ctx_hw_stats);
2745
2746 for (i = 0; i < bp->cp_nr_rings; i++) {
2747 struct bnxt_napi *bnapi = bp->bnapi[i];
2748 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2749
2750 if (cpr->hw_stats) {
2751 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2752 cpr->hw_stats_map);
2753 cpr->hw_stats = NULL;
2754 }
2755 }
2756}
2757
2758static int bnxt_alloc_stats(struct bnxt *bp)
2759{
2760 u32 size, i;
2761 struct pci_dev *pdev = bp->pdev;
2762
2763 size = sizeof(struct ctx_hw_stats);
2764
2765 for (i = 0; i < bp->cp_nr_rings; i++) {
2766 struct bnxt_napi *bnapi = bp->bnapi[i];
2767 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2768
2769 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2770 &cpr->hw_stats_map,
2771 GFP_KERNEL);
2772 if (!cpr->hw_stats)
2773 return -ENOMEM;
2774
2775 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2776 }
Michael Chan3bdf56c2016-03-07 15:38:45 -05002777
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04002778 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05002779 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2780 sizeof(struct tx_port_stats) + 1024;
2781
2782 bp->hw_rx_port_stats =
2783 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2784 &bp->hw_rx_port_stats_map,
2785 GFP_KERNEL);
2786 if (!bp->hw_rx_port_stats)
2787 return -ENOMEM;
2788
2789 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
2790 512;
2791 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
2792 sizeof(struct rx_port_stats) + 512;
2793 bp->flags |= BNXT_FLAG_PORT_STATS;
2794 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002795 return 0;
2796}
2797
2798static void bnxt_clear_ring_indices(struct bnxt *bp)
2799{
2800 int i;
2801
2802 if (!bp->bnapi)
2803 return;
2804
2805 for (i = 0; i < bp->cp_nr_rings; i++) {
2806 struct bnxt_napi *bnapi = bp->bnapi[i];
2807 struct bnxt_cp_ring_info *cpr;
2808 struct bnxt_rx_ring_info *rxr;
2809 struct bnxt_tx_ring_info *txr;
2810
2811 if (!bnapi)
2812 continue;
2813
2814 cpr = &bnapi->cp_ring;
2815 cpr->cp_raw_cons = 0;
2816
Michael Chanb6ab4b02016-01-02 23:44:59 -05002817 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002818 if (txr) {
2819 txr->tx_prod = 0;
2820 txr->tx_cons = 0;
2821 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002822
Michael Chanb6ab4b02016-01-02 23:44:59 -05002823 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002824 if (rxr) {
2825 rxr->rx_prod = 0;
2826 rxr->rx_agg_prod = 0;
2827 rxr->rx_sw_agg_prod = 0;
Michael Chan376a5b82016-05-10 19:17:59 -04002828 rxr->rx_next_cons = 0;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002829 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002830 }
2831}
2832
2833static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2834{
2835#ifdef CONFIG_RFS_ACCEL
2836 int i;
2837
2838 /* Under rtnl_lock and all our NAPIs have been disabled. It's
2839 * safe to delete the hash table.
2840 */
2841 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2842 struct hlist_head *head;
2843 struct hlist_node *tmp;
2844 struct bnxt_ntuple_filter *fltr;
2845
2846 head = &bp->ntp_fltr_hash_tbl[i];
2847 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2848 hlist_del(&fltr->hash);
2849 kfree(fltr);
2850 }
2851 }
2852 if (irq_reinit) {
2853 kfree(bp->ntp_fltr_bmap);
2854 bp->ntp_fltr_bmap = NULL;
2855 }
2856 bp->ntp_fltr_count = 0;
2857#endif
2858}
2859
2860static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2861{
2862#ifdef CONFIG_RFS_ACCEL
2863 int i, rc = 0;
2864
2865 if (!(bp->flags & BNXT_FLAG_RFS))
2866 return 0;
2867
2868 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2869 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2870
2871 bp->ntp_fltr_count = 0;
2872 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2873 GFP_KERNEL);
2874
2875 if (!bp->ntp_fltr_bmap)
2876 rc = -ENOMEM;
2877
2878 return rc;
2879#else
2880 return 0;
2881#endif
2882}
2883
2884static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2885{
2886 bnxt_free_vnic_attributes(bp);
2887 bnxt_free_tx_rings(bp);
2888 bnxt_free_rx_rings(bp);
2889 bnxt_free_cp_rings(bp);
2890 bnxt_free_ntp_fltrs(bp, irq_re_init);
2891 if (irq_re_init) {
2892 bnxt_free_stats(bp);
2893 bnxt_free_ring_grps(bp);
2894 bnxt_free_vnics(bp);
Michael Chanb6ab4b02016-01-02 23:44:59 -05002895 kfree(bp->tx_ring);
2896 bp->tx_ring = NULL;
2897 kfree(bp->rx_ring);
2898 bp->rx_ring = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002899 kfree(bp->bnapi);
2900 bp->bnapi = NULL;
2901 } else {
2902 bnxt_clear_ring_indices(bp);
2903 }
2904}
2905
2906static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2907{
Michael Chan01657bc2016-01-02 23:45:03 -05002908 int i, j, rc, size, arr_size;
Michael Chanc0c050c2015-10-22 16:01:17 -04002909 void *bnapi;
2910
2911 if (irq_re_init) {
2912 /* Allocate bnapi mem pointer array and mem block for
2913 * all queues
2914 */
2915 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2916 bp->cp_nr_rings);
2917 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2918 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2919 if (!bnapi)
2920 return -ENOMEM;
2921
2922 bp->bnapi = bnapi;
2923 bnapi += arr_size;
2924 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2925 bp->bnapi[i] = bnapi;
2926 bp->bnapi[i]->index = i;
2927 bp->bnapi[i]->bp = bp;
2928 }
2929
Michael Chanb6ab4b02016-01-02 23:44:59 -05002930 bp->rx_ring = kcalloc(bp->rx_nr_rings,
2931 sizeof(struct bnxt_rx_ring_info),
2932 GFP_KERNEL);
2933 if (!bp->rx_ring)
2934 return -ENOMEM;
2935
2936 for (i = 0; i < bp->rx_nr_rings; i++) {
2937 bp->rx_ring[i].bnapi = bp->bnapi[i];
2938 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
2939 }
2940
2941 bp->tx_ring = kcalloc(bp->tx_nr_rings,
2942 sizeof(struct bnxt_tx_ring_info),
2943 GFP_KERNEL);
2944 if (!bp->tx_ring)
2945 return -ENOMEM;
2946
Michael Chan01657bc2016-01-02 23:45:03 -05002947 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
2948 j = 0;
2949 else
2950 j = bp->rx_nr_rings;
2951
2952 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
2953 bp->tx_ring[i].bnapi = bp->bnapi[j];
2954 bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
Michael Chanb6ab4b02016-01-02 23:44:59 -05002955 }
2956
Michael Chanc0c050c2015-10-22 16:01:17 -04002957 rc = bnxt_alloc_stats(bp);
2958 if (rc)
2959 goto alloc_mem_err;
2960
2961 rc = bnxt_alloc_ntp_fltrs(bp);
2962 if (rc)
2963 goto alloc_mem_err;
2964
2965 rc = bnxt_alloc_vnics(bp);
2966 if (rc)
2967 goto alloc_mem_err;
2968 }
2969
2970 bnxt_init_ring_struct(bp);
2971
2972 rc = bnxt_alloc_rx_rings(bp);
2973 if (rc)
2974 goto alloc_mem_err;
2975
2976 rc = bnxt_alloc_tx_rings(bp);
2977 if (rc)
2978 goto alloc_mem_err;
2979
2980 rc = bnxt_alloc_cp_rings(bp);
2981 if (rc)
2982 goto alloc_mem_err;
2983
2984 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2985 BNXT_VNIC_UCAST_FLAG;
2986 rc = bnxt_alloc_vnic_attributes(bp);
2987 if (rc)
2988 goto alloc_mem_err;
2989 return 0;
2990
2991alloc_mem_err:
2992 bnxt_free_mem(bp, true);
2993 return rc;
2994}
2995
2996void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
2997 u16 cmpl_ring, u16 target_id)
2998{
Michael Chana8643e12016-02-26 04:00:05 -05002999 struct input *req = request;
Michael Chanc0c050c2015-10-22 16:01:17 -04003000
Michael Chana8643e12016-02-26 04:00:05 -05003001 req->req_type = cpu_to_le16(req_type);
3002 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3003 req->target_id = cpu_to_le16(target_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003004 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3005}
3006
Michael Chanfbfbc482016-02-26 04:00:07 -05003007static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3008 int timeout, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003009{
Michael Chana11fa2b2016-05-15 03:04:47 -04003010 int i, intr_process, rc, tmo_count;
Michael Chana8643e12016-02-26 04:00:05 -05003011 struct input *req = msg;
Michael Chanc0c050c2015-10-22 16:01:17 -04003012 u32 *data = msg;
3013 __le32 *resp_len, *valid;
3014 u16 cp_ring_id, len = 0;
3015 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3016
Michael Chana8643e12016-02-26 04:00:05 -05003017 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
Michael Chanc0c050c2015-10-22 16:01:17 -04003018 memset(resp, 0, PAGE_SIZE);
Michael Chana8643e12016-02-26 04:00:05 -05003019 cp_ring_id = le16_to_cpu(req->cmpl_ring);
Michael Chanc0c050c2015-10-22 16:01:17 -04003020 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3021
3022 /* Write request msg to hwrm channel */
3023 __iowrite32_copy(bp->bar0, data, msg_len / 4);
3024
Michael Chane6ef2692016-03-28 19:46:05 -04003025 for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
Michael Chand79979a2016-01-07 19:56:57 -05003026 writel(0, bp->bar0 + i);
3027
Michael Chanc0c050c2015-10-22 16:01:17 -04003028 /* currently supports only one outstanding message */
3029 if (intr_process)
Michael Chana8643e12016-02-26 04:00:05 -05003030 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003031
3032 /* Ring channel doorbell */
3033 writel(1, bp->bar0 + 0x100);
3034
Michael Chanff4fe812016-02-26 04:00:04 -05003035 if (!timeout)
3036 timeout = DFLT_HWRM_CMD_TIMEOUT;
3037
Michael Chanc0c050c2015-10-22 16:01:17 -04003038 i = 0;
Michael Chana11fa2b2016-05-15 03:04:47 -04003039 tmo_count = timeout * 40;
Michael Chanc0c050c2015-10-22 16:01:17 -04003040 if (intr_process) {
3041 /* Wait until hwrm response cmpl interrupt is processed */
3042 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
Michael Chana11fa2b2016-05-15 03:04:47 -04003043 i++ < tmo_count) {
3044 usleep_range(25, 40);
Michael Chanc0c050c2015-10-22 16:01:17 -04003045 }
3046
3047 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3048 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
Michael Chana8643e12016-02-26 04:00:05 -05003049 le16_to_cpu(req->req_type));
Michael Chanc0c050c2015-10-22 16:01:17 -04003050 return -1;
3051 }
3052 } else {
3053 /* Check if response len is updated */
3054 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
Michael Chana11fa2b2016-05-15 03:04:47 -04003055 for (i = 0; i < tmo_count; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003056 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3057 HWRM_RESP_LEN_SFT;
3058 if (len)
3059 break;
Michael Chana11fa2b2016-05-15 03:04:47 -04003060 usleep_range(25, 40);
Michael Chanc0c050c2015-10-22 16:01:17 -04003061 }
3062
Michael Chana11fa2b2016-05-15 03:04:47 -04003063 if (i >= tmo_count) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003064 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
Michael Chana8643e12016-02-26 04:00:05 -05003065 timeout, le16_to_cpu(req->req_type),
Michael Chan8578d6c2016-05-15 03:04:48 -04003066 le16_to_cpu(req->seq_id), len);
Michael Chanc0c050c2015-10-22 16:01:17 -04003067 return -1;
3068 }
3069
3070 /* Last word of resp contains valid bit */
3071 valid = bp->hwrm_cmd_resp_addr + len - 4;
Michael Chana11fa2b2016-05-15 03:04:47 -04003072 for (i = 0; i < 5; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003073 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3074 break;
Michael Chana11fa2b2016-05-15 03:04:47 -04003075 udelay(1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003076 }
3077
Michael Chana11fa2b2016-05-15 03:04:47 -04003078 if (i >= 5) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003079 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
Michael Chana8643e12016-02-26 04:00:05 -05003080 timeout, le16_to_cpu(req->req_type),
3081 le16_to_cpu(req->seq_id), len, *valid);
Michael Chanc0c050c2015-10-22 16:01:17 -04003082 return -1;
3083 }
3084 }
3085
3086 rc = le16_to_cpu(resp->error_code);
Michael Chanfbfbc482016-02-26 04:00:07 -05003087 if (rc && !silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003088 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3089 le16_to_cpu(resp->req_type),
3090 le16_to_cpu(resp->seq_id), rc);
Michael Chanfbfbc482016-02-26 04:00:07 -05003091 return rc;
3092}
3093
3094int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3095{
3096 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04003097}
3098
3099int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3100{
3101 int rc;
3102
3103 mutex_lock(&bp->hwrm_cmd_lock);
3104 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3105 mutex_unlock(&bp->hwrm_cmd_lock);
3106 return rc;
3107}
3108
Michael Chan90e209212016-02-26 04:00:08 -05003109int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3110 int timeout)
3111{
3112 int rc;
3113
3114 mutex_lock(&bp->hwrm_cmd_lock);
3115 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3116 mutex_unlock(&bp->hwrm_cmd_lock);
3117 return rc;
3118}
3119
Michael Chanc0c050c2015-10-22 16:01:17 -04003120static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3121{
3122 struct hwrm_func_drv_rgtr_input req = {0};
3123 int i;
Michael Chan25be8622016-04-05 14:09:00 -04003124 DECLARE_BITMAP(async_events_bmap, 256);
3125 u32 *events = (u32 *)async_events_bmap;
Michael Chanc0c050c2015-10-22 16:01:17 -04003126
3127 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3128
3129 req.enables =
3130 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3131 FUNC_DRV_RGTR_REQ_ENABLES_VER |
3132 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
3133
Michael Chan25be8622016-04-05 14:09:00 -04003134 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3135 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3136 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3137
3138 for (i = 0; i < 8; i++)
3139 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3140
Michael Chan11f15ed2016-04-05 14:08:55 -04003141 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
Michael Chanc0c050c2015-10-22 16:01:17 -04003142 req.ver_maj = DRV_VER_MAJ;
3143 req.ver_min = DRV_VER_MIN;
3144 req.ver_upd = DRV_VER_UPD;
3145
3146 if (BNXT_PF(bp)) {
Michael Chande68f5de2015-12-09 19:35:41 -05003147 DECLARE_BITMAP(vf_req_snif_bmap, 256);
Michael Chanc0c050c2015-10-22 16:01:17 -04003148 u32 *data = (u32 *)vf_req_snif_bmap;
3149
Michael Chande68f5de2015-12-09 19:35:41 -05003150 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
Michael Chanc0c050c2015-10-22 16:01:17 -04003151 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
3152 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
3153
Michael Chande68f5de2015-12-09 19:35:41 -05003154 for (i = 0; i < 8; i++)
3155 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3156
Michael Chanc0c050c2015-10-22 16:01:17 -04003157 req.enables |=
3158 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3159 }
3160
3161 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3162}
3163
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05003164static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3165{
3166 struct hwrm_func_drv_unrgtr_input req = {0};
3167
3168 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3169 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3170}
3171
Michael Chanc0c050c2015-10-22 16:01:17 -04003172static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3173{
3174 u32 rc = 0;
3175 struct hwrm_tunnel_dst_port_free_input req = {0};
3176
3177 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3178 req.tunnel_type = tunnel_type;
3179
3180 switch (tunnel_type) {
3181 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3182 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3183 break;
3184 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3185 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3186 break;
3187 default:
3188 break;
3189 }
3190
3191 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3192 if (rc)
3193 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3194 rc);
3195 return rc;
3196}
3197
3198static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3199 u8 tunnel_type)
3200{
3201 u32 rc = 0;
3202 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3203 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3204
3205 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3206
3207 req.tunnel_type = tunnel_type;
3208 req.tunnel_dst_port_val = port;
3209
3210 mutex_lock(&bp->hwrm_cmd_lock);
3211 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3212 if (rc) {
3213 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3214 rc);
3215 goto err_out;
3216 }
3217
Christophe Jaillet57aac712016-11-22 06:14:40 +01003218 switch (tunnel_type) {
3219 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
Michael Chanc0c050c2015-10-22 16:01:17 -04003220 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01003221 break;
3222 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
Michael Chanc0c050c2015-10-22 16:01:17 -04003223 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01003224 break;
3225 default:
3226 break;
3227 }
3228
Michael Chanc0c050c2015-10-22 16:01:17 -04003229err_out:
3230 mutex_unlock(&bp->hwrm_cmd_lock);
3231 return rc;
3232}
3233
3234static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3235{
3236 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3237 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3238
3239 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -05003240 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003241
3242 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3243 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3244 req.mask = cpu_to_le32(vnic->rx_mask);
3245 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3246}
3247
3248#ifdef CONFIG_RFS_ACCEL
3249static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3250 struct bnxt_ntuple_filter *fltr)
3251{
3252 struct hwrm_cfa_ntuple_filter_free_input req = {0};
3253
3254 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3255 req.ntuple_filter_id = fltr->filter_id;
3256 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3257}
3258
3259#define BNXT_NTP_FLTR_FLAGS \
3260 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
3261 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
3262 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
3263 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
3264 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
3265 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
3266 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
3267 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
3268 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
3269 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
3270 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
3271 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
3272 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
Michael Chanc1935542015-12-27 18:19:28 -05003273 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04003274
3275static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3276 struct bnxt_ntuple_filter *fltr)
3277{
3278 int rc = 0;
3279 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3280 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3281 bp->hwrm_cmd_resp_addr;
3282 struct flow_keys *keys = &fltr->fkeys;
3283 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3284
3285 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
Michael Chana54c4d72016-07-25 12:33:35 -04003286 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04003287
3288 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3289
3290 req.ethertype = htons(ETH_P_IP);
3291 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
Michael Chanc1935542015-12-27 18:19:28 -05003292 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
Michael Chanc0c050c2015-10-22 16:01:17 -04003293 req.ip_protocol = keys->basic.ip_proto;
3294
3295 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3296 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3297 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3298 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3299
3300 req.src_port = keys->ports.src;
3301 req.src_port_mask = cpu_to_be16(0xffff);
3302 req.dst_port = keys->ports.dst;
3303 req.dst_port_mask = cpu_to_be16(0xffff);
3304
Michael Chanc1935542015-12-27 18:19:28 -05003305 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003306 mutex_lock(&bp->hwrm_cmd_lock);
3307 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3308 if (!rc)
3309 fltr->filter_id = resp->ntuple_filter_id;
3310 mutex_unlock(&bp->hwrm_cmd_lock);
3311 return rc;
3312}
3313#endif
3314
3315static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3316 u8 *mac_addr)
3317{
3318 u32 rc = 0;
3319 struct hwrm_cfa_l2_filter_alloc_input req = {0};
3320 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3321
3322 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003323 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3324 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3325 req.flags |=
3326 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
Michael Chanc1935542015-12-27 18:19:28 -05003327 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003328 req.enables =
3329 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
Michael Chanc1935542015-12-27 18:19:28 -05003330 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
Michael Chanc0c050c2015-10-22 16:01:17 -04003331 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3332 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3333 req.l2_addr_mask[0] = 0xff;
3334 req.l2_addr_mask[1] = 0xff;
3335 req.l2_addr_mask[2] = 0xff;
3336 req.l2_addr_mask[3] = 0xff;
3337 req.l2_addr_mask[4] = 0xff;
3338 req.l2_addr_mask[5] = 0xff;
3339
3340 mutex_lock(&bp->hwrm_cmd_lock);
3341 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3342 if (!rc)
3343 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3344 resp->l2_filter_id;
3345 mutex_unlock(&bp->hwrm_cmd_lock);
3346 return rc;
3347}
3348
3349static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3350{
3351 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3352 int rc = 0;
3353
3354 /* Any associated ntuple filters will also be cleared by firmware. */
3355 mutex_lock(&bp->hwrm_cmd_lock);
3356 for (i = 0; i < num_of_vnics; i++) {
3357 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3358
3359 for (j = 0; j < vnic->uc_filter_count; j++) {
3360 struct hwrm_cfa_l2_filter_free_input req = {0};
3361
3362 bnxt_hwrm_cmd_hdr_init(bp, &req,
3363 HWRM_CFA_L2_FILTER_FREE, -1, -1);
3364
3365 req.l2_filter_id = vnic->fw_l2_filter_id[j];
3366
3367 rc = _hwrm_send_message(bp, &req, sizeof(req),
3368 HWRM_CMD_TIMEOUT);
3369 }
3370 vnic->uc_filter_count = 0;
3371 }
3372 mutex_unlock(&bp->hwrm_cmd_lock);
3373
3374 return rc;
3375}
3376
3377static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3378{
3379 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3380 struct hwrm_vnic_tpa_cfg_input req = {0};
3381
3382 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3383
3384 if (tpa_flags) {
3385 u16 mss = bp->dev->mtu - 40;
3386 u32 nsegs, n, segs = 0, flags;
3387
3388 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3389 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3390 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3391 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3392 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3393 if (tpa_flags & BNXT_FLAG_GRO)
3394 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3395
3396 req.flags = cpu_to_le32(flags);
3397
3398 req.enables =
3399 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
Michael Chanc1935542015-12-27 18:19:28 -05003400 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3401 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04003402
3403 /* Number of segs are log2 units, and first packet is not
3404 * included as part of this units.
3405 */
Michael Chan2839f282016-04-25 02:30:50 -04003406 if (mss <= BNXT_RX_PAGE_SIZE) {
3407 n = BNXT_RX_PAGE_SIZE / mss;
Michael Chanc0c050c2015-10-22 16:01:17 -04003408 nsegs = (MAX_SKB_FRAGS - 1) * n;
3409 } else {
Michael Chan2839f282016-04-25 02:30:50 -04003410 n = mss / BNXT_RX_PAGE_SIZE;
3411 if (mss & (BNXT_RX_PAGE_SIZE - 1))
Michael Chanc0c050c2015-10-22 16:01:17 -04003412 n++;
3413 nsegs = (MAX_SKB_FRAGS - n) / n;
3414 }
3415
3416 segs = ilog2(nsegs);
3417 req.max_agg_segs = cpu_to_le16(segs);
3418 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
Michael Chanc1935542015-12-27 18:19:28 -05003419
3420 req.min_agg_len = cpu_to_le32(512);
Michael Chanc0c050c2015-10-22 16:01:17 -04003421 }
3422 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3423
3424 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3425}
3426
3427static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3428{
3429 u32 i, j, max_rings;
3430 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3431 struct hwrm_vnic_rss_cfg_input req = {0};
3432
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003433 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04003434 return 0;
3435
3436 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3437 if (set_rss) {
Michael Chan87da7f72016-11-16 21:13:09 -05003438 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003439 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3440 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3441 max_rings = bp->rx_nr_rings - 1;
3442 else
3443 max_rings = bp->rx_nr_rings;
3444 } else {
Michael Chanc0c050c2015-10-22 16:01:17 -04003445 max_rings = 1;
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003446 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003447
3448 /* Fill the RSS indirection table with ring group ids */
3449 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3450 if (j == max_rings)
3451 j = 0;
3452 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3453 }
3454
3455 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3456 req.hash_key_tbl_addr =
3457 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3458 }
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003459 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
Michael Chanc0c050c2015-10-22 16:01:17 -04003460 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3461}
3462
3463static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3464{
3465 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3466 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3467
3468 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3469 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3470 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3471 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3472 req.enables =
3473 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3474 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3475 /* thresholds not implemented in firmware yet */
3476 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3477 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3478 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3479 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3480}
3481
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003482static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3483 u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04003484{
3485 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3486
3487 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3488 req.rss_cos_lb_ctx_id =
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003489 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
Michael Chanc0c050c2015-10-22 16:01:17 -04003490
3491 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003492 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003493}
3494
3495static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3496{
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003497 int i, j;
Michael Chanc0c050c2015-10-22 16:01:17 -04003498
3499 for (i = 0; i < bp->nr_vnics; i++) {
3500 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3501
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003502 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3503 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3504 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3505 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003506 }
3507 bp->rsscos_nr_ctxs = 0;
3508}
3509
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003510static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04003511{
3512 int rc;
3513 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3514 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3515 bp->hwrm_cmd_resp_addr;
3516
3517 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3518 -1);
3519
3520 mutex_lock(&bp->hwrm_cmd_lock);
3521 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3522 if (!rc)
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003523 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
Michael Chanc0c050c2015-10-22 16:01:17 -04003524 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3525 mutex_unlock(&bp->hwrm_cmd_lock);
3526
3527 return rc;
3528}
3529
3530static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3531{
Michael Chanb81a90d2016-01-02 23:45:01 -05003532 unsigned int ring = 0, grp_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04003533 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3534 struct hwrm_vnic_cfg_input req = {0};
Michael Chancf6645f2016-06-13 02:25:28 -04003535 u16 def_vlan = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003536
3537 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003538
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003539 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
3540 /* Only RSS support for now TBD: COS & LB */
3541 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3542 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3543 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3544 VNIC_CFG_REQ_ENABLES_MRU);
3545 } else {
3546 req.rss_rule = cpu_to_le16(0xffff);
3547 }
3548
3549 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3550 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003551 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3552 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3553 } else {
3554 req.cos_rule = cpu_to_le16(0xffff);
3555 }
3556
Michael Chanc0c050c2015-10-22 16:01:17 -04003557 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05003558 ring = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003559 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05003560 ring = vnic_id - 1;
Prashant Sreedharan76595192016-07-18 07:15:22 -04003561 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
3562 ring = bp->rx_nr_rings - 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04003563
Michael Chanb81a90d2016-01-02 23:45:01 -05003564 grp_idx = bp->rx_ring[ring].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003565 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3566 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3567
3568 req.lb_rule = cpu_to_le16(0xffff);
3569 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3570 VLAN_HLEN);
3571
Michael Chancf6645f2016-06-13 02:25:28 -04003572#ifdef CONFIG_BNXT_SRIOV
3573 if (BNXT_VF(bp))
3574 def_vlan = bp->vf.vlan;
3575#endif
3576 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
Michael Chanc0c050c2015-10-22 16:01:17 -04003577 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3578
3579 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3580}
3581
3582static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3583{
3584 u32 rc = 0;
3585
3586 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3587 struct hwrm_vnic_free_input req = {0};
3588
3589 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3590 req.vnic_id =
3591 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3592
3593 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3594 if (rc)
3595 return rc;
3596 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3597 }
3598 return rc;
3599}
3600
3601static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3602{
3603 u16 i;
3604
3605 for (i = 0; i < bp->nr_vnics; i++)
3606 bnxt_hwrm_vnic_free_one(bp, i);
3607}
3608
Michael Chanb81a90d2016-01-02 23:45:01 -05003609static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3610 unsigned int start_rx_ring_idx,
3611 unsigned int nr_rings)
Michael Chanc0c050c2015-10-22 16:01:17 -04003612{
Michael Chanb81a90d2016-01-02 23:45:01 -05003613 int rc = 0;
3614 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04003615 struct hwrm_vnic_alloc_input req = {0};
3616 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3617
3618 /* map ring groups to this vnic */
Michael Chanb81a90d2016-01-02 23:45:01 -05003619 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3620 grp_idx = bp->rx_ring[i].bnapi->index;
3621 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003622 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05003623 j, nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04003624 break;
3625 }
3626 bp->vnic_info[vnic_id].fw_grp_ids[j] =
Michael Chanb81a90d2016-01-02 23:45:01 -05003627 bp->grp_info[grp_idx].fw_grp_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003628 }
3629
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003630 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
3631 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003632 if (vnic_id == 0)
3633 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3634
3635 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3636
3637 mutex_lock(&bp->hwrm_cmd_lock);
3638 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3639 if (!rc)
3640 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3641 mutex_unlock(&bp->hwrm_cmd_lock);
3642 return rc;
3643}
3644
3645static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3646{
3647 u16 i;
3648 u32 rc = 0;
3649
3650 mutex_lock(&bp->hwrm_cmd_lock);
3651 for (i = 0; i < bp->rx_nr_rings; i++) {
3652 struct hwrm_ring_grp_alloc_input req = {0};
3653 struct hwrm_ring_grp_alloc_output *resp =
3654 bp->hwrm_cmd_resp_addr;
Michael Chanb81a90d2016-01-02 23:45:01 -05003655 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003656
3657 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3658
Michael Chanb81a90d2016-01-02 23:45:01 -05003659 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
3660 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
3661 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
3662 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
Michael Chanc0c050c2015-10-22 16:01:17 -04003663
3664 rc = _hwrm_send_message(bp, &req, sizeof(req),
3665 HWRM_CMD_TIMEOUT);
3666 if (rc)
3667 break;
3668
Michael Chanb81a90d2016-01-02 23:45:01 -05003669 bp->grp_info[grp_idx].fw_grp_id =
3670 le32_to_cpu(resp->ring_group_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003671 }
3672 mutex_unlock(&bp->hwrm_cmd_lock);
3673 return rc;
3674}
3675
3676static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3677{
3678 u16 i;
3679 u32 rc = 0;
3680 struct hwrm_ring_grp_free_input req = {0};
3681
3682 if (!bp->grp_info)
3683 return 0;
3684
3685 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3686
3687 mutex_lock(&bp->hwrm_cmd_lock);
3688 for (i = 0; i < bp->cp_nr_rings; i++) {
3689 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3690 continue;
3691 req.ring_group_id =
3692 cpu_to_le32(bp->grp_info[i].fw_grp_id);
3693
3694 rc = _hwrm_send_message(bp, &req, sizeof(req),
3695 HWRM_CMD_TIMEOUT);
3696 if (rc)
3697 break;
3698 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3699 }
3700 mutex_unlock(&bp->hwrm_cmd_lock);
3701 return rc;
3702}
3703
3704static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3705 struct bnxt_ring_struct *ring,
3706 u32 ring_type, u32 map_index,
3707 u32 stats_ctx_id)
3708{
3709 int rc = 0, err = 0;
3710 struct hwrm_ring_alloc_input req = {0};
3711 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3712 u16 ring_id;
3713
3714 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3715
3716 req.enables = 0;
3717 if (ring->nr_pages > 1) {
3718 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3719 /* Page size is in log2 units */
3720 req.page_size = BNXT_PAGE_SHIFT;
3721 req.page_tbl_depth = 1;
3722 } else {
3723 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
3724 }
3725 req.fbo = 0;
3726 /* Association of ring index with doorbell index and MSIX number */
3727 req.logical_id = cpu_to_le16(map_index);
3728
3729 switch (ring_type) {
3730 case HWRM_RING_ALLOC_TX:
3731 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3732 /* Association of transmit ring with completion ring */
3733 req.cmpl_ring_id =
3734 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3735 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3736 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3737 req.queue_id = cpu_to_le16(ring->queue_id);
3738 break;
3739 case HWRM_RING_ALLOC_RX:
3740 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3741 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3742 break;
3743 case HWRM_RING_ALLOC_AGG:
3744 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3745 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3746 break;
3747 case HWRM_RING_ALLOC_CMPL:
3748 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3749 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3750 if (bp->flags & BNXT_FLAG_USING_MSIX)
3751 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3752 break;
3753 default:
3754 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3755 ring_type);
3756 return -1;
3757 }
3758
3759 mutex_lock(&bp->hwrm_cmd_lock);
3760 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3761 err = le16_to_cpu(resp->error_code);
3762 ring_id = le16_to_cpu(resp->ring_id);
3763 mutex_unlock(&bp->hwrm_cmd_lock);
3764
3765 if (rc || err) {
3766 switch (ring_type) {
3767 case RING_FREE_REQ_RING_TYPE_CMPL:
3768 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3769 rc, err);
3770 return -1;
3771
3772 case RING_FREE_REQ_RING_TYPE_RX:
3773 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3774 rc, err);
3775 return -1;
3776
3777 case RING_FREE_REQ_RING_TYPE_TX:
3778 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3779 rc, err);
3780 return -1;
3781
3782 default:
3783 netdev_err(bp->dev, "Invalid ring\n");
3784 return -1;
3785 }
3786 }
3787 ring->fw_ring_id = ring_id;
3788 return rc;
3789}
3790
3791static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3792{
3793 int i, rc = 0;
3794
Michael Chanedd0c2c2015-12-27 18:19:19 -05003795 for (i = 0; i < bp->cp_nr_rings; i++) {
3796 struct bnxt_napi *bnapi = bp->bnapi[i];
3797 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3798 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04003799
Prashant Sreedharan33e52d82016-03-28 19:46:04 -04003800 cpr->cp_doorbell = bp->bar1 + i * 0x80;
Michael Chanedd0c2c2015-12-27 18:19:19 -05003801 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3802 INVALID_STATS_CTX_ID);
3803 if (rc)
3804 goto err_out;
Michael Chanedd0c2c2015-12-27 18:19:19 -05003805 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3806 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003807 }
3808
Michael Chanedd0c2c2015-12-27 18:19:19 -05003809 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003810 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003811 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003812 u32 map_idx = txr->bnapi->index;
3813 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
Michael Chanc0c050c2015-10-22 16:01:17 -04003814
Michael Chanb81a90d2016-01-02 23:45:01 -05003815 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
3816 map_idx, fw_stats_ctx);
Michael Chanedd0c2c2015-12-27 18:19:19 -05003817 if (rc)
3818 goto err_out;
Michael Chanb81a90d2016-01-02 23:45:01 -05003819 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanc0c050c2015-10-22 16:01:17 -04003820 }
3821
Michael Chanedd0c2c2015-12-27 18:19:19 -05003822 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003823 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003824 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003825 u32 map_idx = rxr->bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003826
Michael Chanb81a90d2016-01-02 23:45:01 -05003827 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
3828 map_idx, INVALID_STATS_CTX_ID);
Michael Chanedd0c2c2015-12-27 18:19:19 -05003829 if (rc)
3830 goto err_out;
Michael Chanb81a90d2016-01-02 23:45:01 -05003831 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanedd0c2c2015-12-27 18:19:19 -05003832 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
Michael Chanb81a90d2016-01-02 23:45:01 -05003833 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003834 }
3835
3836 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3837 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003838 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04003839 struct bnxt_ring_struct *ring =
3840 &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003841 u32 grp_idx = rxr->bnapi->index;
3842 u32 map_idx = grp_idx + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04003843
3844 rc = hwrm_ring_alloc_send_msg(bp, ring,
3845 HWRM_RING_ALLOC_AGG,
Michael Chanb81a90d2016-01-02 23:45:01 -05003846 map_idx,
Michael Chanc0c050c2015-10-22 16:01:17 -04003847 INVALID_STATS_CTX_ID);
3848 if (rc)
3849 goto err_out;
3850
Michael Chanb81a90d2016-01-02 23:45:01 -05003851 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanc0c050c2015-10-22 16:01:17 -04003852 writel(DB_KEY_RX | rxr->rx_agg_prod,
3853 rxr->rx_agg_doorbell);
Michael Chanb81a90d2016-01-02 23:45:01 -05003854 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003855 }
3856 }
3857err_out:
3858 return rc;
3859}
3860
3861static int hwrm_ring_free_send_msg(struct bnxt *bp,
3862 struct bnxt_ring_struct *ring,
3863 u32 ring_type, int cmpl_ring_id)
3864{
3865 int rc;
3866 struct hwrm_ring_free_input req = {0};
3867 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3868 u16 error_code;
3869
Prashant Sreedharan74608fc2016-01-28 03:11:20 -05003870 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003871 req.ring_type = ring_type;
3872 req.ring_id = cpu_to_le16(ring->fw_ring_id);
3873
3874 mutex_lock(&bp->hwrm_cmd_lock);
3875 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3876 error_code = le16_to_cpu(resp->error_code);
3877 mutex_unlock(&bp->hwrm_cmd_lock);
3878
3879 if (rc || error_code) {
3880 switch (ring_type) {
3881 case RING_FREE_REQ_RING_TYPE_CMPL:
3882 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3883 rc);
3884 return rc;
3885 case RING_FREE_REQ_RING_TYPE_RX:
3886 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3887 rc);
3888 return rc;
3889 case RING_FREE_REQ_RING_TYPE_TX:
3890 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3891 rc);
3892 return rc;
3893 default:
3894 netdev_err(bp->dev, "Invalid ring\n");
3895 return -1;
3896 }
3897 }
3898 return 0;
3899}
3900
Michael Chanedd0c2c2015-12-27 18:19:19 -05003901static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
Michael Chanc0c050c2015-10-22 16:01:17 -04003902{
Michael Chanedd0c2c2015-12-27 18:19:19 -05003903 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003904
3905 if (!bp->bnapi)
Michael Chanedd0c2c2015-12-27 18:19:19 -05003906 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04003907
Michael Chanedd0c2c2015-12-27 18:19:19 -05003908 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003909 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003910 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003911 u32 grp_idx = txr->bnapi->index;
3912 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003913
Michael Chanedd0c2c2015-12-27 18:19:19 -05003914 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3915 hwrm_ring_free_send_msg(bp, ring,
3916 RING_FREE_REQ_RING_TYPE_TX,
3917 close_path ? cmpl_ring_id :
3918 INVALID_HW_RING_ID);
3919 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003920 }
3921 }
3922
Michael Chanedd0c2c2015-12-27 18:19:19 -05003923 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003924 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003925 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003926 u32 grp_idx = rxr->bnapi->index;
3927 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003928
Michael Chanedd0c2c2015-12-27 18:19:19 -05003929 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3930 hwrm_ring_free_send_msg(bp, ring,
3931 RING_FREE_REQ_RING_TYPE_RX,
3932 close_path ? cmpl_ring_id :
3933 INVALID_HW_RING_ID);
3934 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05003935 bp->grp_info[grp_idx].rx_fw_ring_id =
3936 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003937 }
3938 }
3939
Michael Chanedd0c2c2015-12-27 18:19:19 -05003940 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003941 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003942 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003943 u32 grp_idx = rxr->bnapi->index;
3944 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003945
Michael Chanedd0c2c2015-12-27 18:19:19 -05003946 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3947 hwrm_ring_free_send_msg(bp, ring,
3948 RING_FREE_REQ_RING_TYPE_RX,
3949 close_path ? cmpl_ring_id :
3950 INVALID_HW_RING_ID);
3951 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05003952 bp->grp_info[grp_idx].agg_fw_ring_id =
3953 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003954 }
3955 }
3956
Michael Chanedd0c2c2015-12-27 18:19:19 -05003957 for (i = 0; i < bp->cp_nr_rings; i++) {
3958 struct bnxt_napi *bnapi = bp->bnapi[i];
3959 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3960 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04003961
Michael Chanedd0c2c2015-12-27 18:19:19 -05003962 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3963 hwrm_ring_free_send_msg(bp, ring,
3964 RING_FREE_REQ_RING_TYPE_CMPL,
3965 INVALID_HW_RING_ID);
3966 ring->fw_ring_id = INVALID_HW_RING_ID;
3967 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003968 }
3969 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003970}
3971
Michael Chanbb053f52016-02-26 04:00:02 -05003972static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
3973 u32 buf_tmrs, u16 flags,
3974 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3975{
3976 req->flags = cpu_to_le16(flags);
3977 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
3978 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
3979 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
3980 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
3981 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
3982 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
3983 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
3984 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
3985}
3986
Michael Chanc0c050c2015-10-22 16:01:17 -04003987int bnxt_hwrm_set_coal(struct bnxt *bp)
3988{
3989 int i, rc = 0;
Michael Chandfc9c942016-02-26 04:00:03 -05003990 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
3991 req_tx = {0}, *req;
Michael Chanc0c050c2015-10-22 16:01:17 -04003992 u16 max_buf, max_buf_irq;
3993 u16 buf_tmr, buf_tmr_irq;
3994 u32 flags;
3995
Michael Chandfc9c942016-02-26 04:00:03 -05003996 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
3997 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
3998 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
3999 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004000
Michael Chandfb5b892016-02-26 04:00:01 -05004001 /* Each rx completion (2 records) should be DMAed immediately.
4002 * DMA 1/4 of the completion buffers at a time.
4003 */
4004 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
Michael Chanc0c050c2015-10-22 16:01:17 -04004005 /* max_buf must not be zero */
4006 max_buf = clamp_t(u16, max_buf, 1, 63);
Michael Chandfb5b892016-02-26 04:00:01 -05004007 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4008 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4009 /* buf timer set to 1/4 of interrupt timer */
4010 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4011 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4012 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004013
4014 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4015
4016 /* RING_IDLE generates more IRQs for lower latency. Enable it only
4017 * if coal_ticks is less than 25 us.
4018 */
Michael Chandfb5b892016-02-26 04:00:01 -05004019 if (bp->rx_coal_ticks < 25)
Michael Chanc0c050c2015-10-22 16:01:17 -04004020 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4021
Michael Chanbb053f52016-02-26 04:00:02 -05004022 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
Michael Chandfc9c942016-02-26 04:00:03 -05004023 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4024
4025 /* max_buf must not be zero */
4026 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4027 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4028 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4029 /* buf timer set to 1/4 of interrupt timer */
4030 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4031 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4032 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4033
4034 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4035 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4036 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004037
4038 mutex_lock(&bp->hwrm_cmd_lock);
4039 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chandfc9c942016-02-26 04:00:03 -05004040 struct bnxt_napi *bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04004041
Michael Chandfc9c942016-02-26 04:00:03 -05004042 req = &req_rx;
4043 if (!bnapi->rx_ring)
4044 req = &req_tx;
4045 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4046
4047 rc = _hwrm_send_message(bp, req, sizeof(*req),
Michael Chanc0c050c2015-10-22 16:01:17 -04004048 HWRM_CMD_TIMEOUT);
4049 if (rc)
4050 break;
4051 }
4052 mutex_unlock(&bp->hwrm_cmd_lock);
4053 return rc;
4054}
4055
4056static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4057{
4058 int rc = 0, i;
4059 struct hwrm_stat_ctx_free_input req = {0};
4060
4061 if (!bp->bnapi)
4062 return 0;
4063
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004064 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4065 return 0;
4066
Michael Chanc0c050c2015-10-22 16:01:17 -04004067 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4068
4069 mutex_lock(&bp->hwrm_cmd_lock);
4070 for (i = 0; i < bp->cp_nr_rings; i++) {
4071 struct bnxt_napi *bnapi = bp->bnapi[i];
4072 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4073
4074 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4075 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4076
4077 rc = _hwrm_send_message(bp, &req, sizeof(req),
4078 HWRM_CMD_TIMEOUT);
4079 if (rc)
4080 break;
4081
4082 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4083 }
4084 }
4085 mutex_unlock(&bp->hwrm_cmd_lock);
4086 return rc;
4087}
4088
4089static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4090{
4091 int rc = 0, i;
4092 struct hwrm_stat_ctx_alloc_input req = {0};
4093 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4094
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004095 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4096 return 0;
4097
Michael Chanc0c050c2015-10-22 16:01:17 -04004098 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4099
Michael Chan51f30782016-07-01 18:46:29 -04004100 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
Michael Chanc0c050c2015-10-22 16:01:17 -04004101
4102 mutex_lock(&bp->hwrm_cmd_lock);
4103 for (i = 0; i < bp->cp_nr_rings; i++) {
4104 struct bnxt_napi *bnapi = bp->bnapi[i];
4105 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4106
4107 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4108
4109 rc = _hwrm_send_message(bp, &req, sizeof(req),
4110 HWRM_CMD_TIMEOUT);
4111 if (rc)
4112 break;
4113
4114 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4115
4116 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4117 }
4118 mutex_unlock(&bp->hwrm_cmd_lock);
4119 return 0;
4120}
4121
Michael Chancf6645f2016-06-13 02:25:28 -04004122static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4123{
4124 struct hwrm_func_qcfg_input req = {0};
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04004125 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chancf6645f2016-06-13 02:25:28 -04004126 int rc;
4127
4128 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4129 req.fid = cpu_to_le16(0xffff);
4130 mutex_lock(&bp->hwrm_cmd_lock);
4131 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4132 if (rc)
4133 goto func_qcfg_exit;
4134
4135#ifdef CONFIG_BNXT_SRIOV
4136 if (BNXT_VF(bp)) {
Michael Chancf6645f2016-06-13 02:25:28 -04004137 struct bnxt_vf_info *vf = &bp->vf;
4138
4139 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4140 }
4141#endif
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04004142 switch (resp->port_partition_type) {
4143 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4144 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4145 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4146 bp->port_partition_type = resp->port_partition_type;
4147 break;
4148 }
Michael Chancf6645f2016-06-13 02:25:28 -04004149
4150func_qcfg_exit:
4151 mutex_unlock(&bp->hwrm_cmd_lock);
4152 return rc;
4153}
4154
Michael Chan4a21b492015-12-27 18:19:26 -05004155int bnxt_hwrm_func_qcaps(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04004156{
4157 int rc = 0;
4158 struct hwrm_func_qcaps_input req = {0};
4159 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4160
4161 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4162 req.fid = cpu_to_le16(0xffff);
4163
4164 mutex_lock(&bp->hwrm_cmd_lock);
4165 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4166 if (rc)
4167 goto hwrm_func_qcaps_exit;
4168
Michael Chan7cc5a202016-09-19 03:58:05 -04004169 bp->tx_push_thresh = 0;
4170 if (resp->flags &
4171 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4172 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4173
Michael Chanc0c050c2015-10-22 16:01:17 -04004174 if (BNXT_PF(bp)) {
4175 struct bnxt_pf_info *pf = &bp->pf;
4176
4177 pf->fw_fid = le16_to_cpu(resp->fid);
4178 pf->port_id = le16_to_cpu(resp->port_id);
Michael Chan87027db2016-07-01 18:46:28 -04004179 bp->dev->dev_port = pf->port_id;
Michael Chan11f15ed2016-04-05 14:08:55 -04004180 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
Jeffrey Huangbdd43472015-12-02 01:54:07 -05004181 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04004182 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4183 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4184 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004185 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05004186 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4187 if (!pf->max_hw_ring_grps)
4188 pf->max_hw_ring_grps = pf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004189 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4190 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4191 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4192 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4193 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4194 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4195 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4196 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4197 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4198 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4199 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
4200 } else {
Michael Chan379a80a2015-10-23 15:06:19 -04004201#ifdef CONFIG_BNXT_SRIOV
Michael Chanc0c050c2015-10-22 16:01:17 -04004202 struct bnxt_vf_info *vf = &bp->vf;
4203
4204 vf->fw_fid = le16_to_cpu(resp->fid);
Michael Chanc0c050c2015-10-22 16:01:17 -04004205
4206 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4207 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4208 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4209 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05004210 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4211 if (!vf->max_hw_ring_grps)
4212 vf->max_hw_ring_grps = vf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004213 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4214 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4215 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
Michael Chan7cc5a202016-09-19 03:58:05 -04004216
4217 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
Michael Chan001154e2016-09-19 03:58:06 -04004218 mutex_unlock(&bp->hwrm_cmd_lock);
4219
4220 if (is_valid_ether_addr(vf->mac_addr)) {
Michael Chan7cc5a202016-09-19 03:58:05 -04004221 /* overwrite netdev dev_adr with admin VF MAC */
4222 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
Michael Chan001154e2016-09-19 03:58:06 -04004223 } else {
Michael Chan7cc5a202016-09-19 03:58:05 -04004224 random_ether_addr(bp->dev->dev_addr);
Michael Chan001154e2016-09-19 03:58:06 -04004225 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4226 }
4227 return rc;
Michael Chan379a80a2015-10-23 15:06:19 -04004228#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04004229 }
4230
Michael Chanc0c050c2015-10-22 16:01:17 -04004231hwrm_func_qcaps_exit:
4232 mutex_unlock(&bp->hwrm_cmd_lock);
4233 return rc;
4234}
4235
4236static int bnxt_hwrm_func_reset(struct bnxt *bp)
4237{
4238 struct hwrm_func_reset_input req = {0};
4239
4240 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4241 req.enables = 0;
4242
4243 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4244}
4245
4246static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4247{
4248 int rc = 0;
4249 struct hwrm_queue_qportcfg_input req = {0};
4250 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4251 u8 i, *qptr;
4252
4253 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4254
4255 mutex_lock(&bp->hwrm_cmd_lock);
4256 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4257 if (rc)
4258 goto qportcfg_exit;
4259
4260 if (!resp->max_configurable_queues) {
4261 rc = -EINVAL;
4262 goto qportcfg_exit;
4263 }
4264 bp->max_tc = resp->max_configurable_queues;
Michael Chan87c374d2016-12-02 21:17:16 -05004265 bp->max_lltc = resp->max_configurable_lossless_queues;
Michael Chanc0c050c2015-10-22 16:01:17 -04004266 if (bp->max_tc > BNXT_MAX_QUEUE)
4267 bp->max_tc = BNXT_MAX_QUEUE;
4268
Michael Chan441cabb2016-09-19 03:58:02 -04004269 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4270 bp->max_tc = 1;
4271
Michael Chan87c374d2016-12-02 21:17:16 -05004272 if (bp->max_lltc > bp->max_tc)
4273 bp->max_lltc = bp->max_tc;
4274
Michael Chanc0c050c2015-10-22 16:01:17 -04004275 qptr = &resp->queue_id0;
4276 for (i = 0; i < bp->max_tc; i++) {
4277 bp->q_info[i].queue_id = *qptr++;
4278 bp->q_info[i].queue_profile = *qptr++;
4279 }
4280
4281qportcfg_exit:
4282 mutex_unlock(&bp->hwrm_cmd_lock);
4283 return rc;
4284}
4285
4286static int bnxt_hwrm_ver_get(struct bnxt *bp)
4287{
4288 int rc;
4289 struct hwrm_ver_get_input req = {0};
4290 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
4291
Michael Chane6ef2692016-03-28 19:46:05 -04004292 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
Michael Chanc0c050c2015-10-22 16:01:17 -04004293 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4294 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4295 req.hwrm_intf_min = HWRM_VERSION_MINOR;
4296 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4297 mutex_lock(&bp->hwrm_cmd_lock);
4298 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4299 if (rc)
4300 goto hwrm_ver_get_exit;
4301
4302 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4303
Michael Chan11f15ed2016-04-05 14:08:55 -04004304 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4305 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
Michael Chanc1935542015-12-27 18:19:28 -05004306 if (resp->hwrm_intf_maj < 1) {
4307 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04004308 resp->hwrm_intf_maj, resp->hwrm_intf_min,
Michael Chanc1935542015-12-27 18:19:28 -05004309 resp->hwrm_intf_upd);
4310 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04004311 }
Rob Swindell3ebf6f02016-02-26 04:00:06 -05004312 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
Michael Chanc0c050c2015-10-22 16:01:17 -04004313 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4314 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4315
Michael Chanff4fe812016-02-26 04:00:04 -05004316 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4317 if (!bp->hwrm_cmd_timeout)
4318 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4319
Michael Chane6ef2692016-03-28 19:46:05 -04004320 if (resp->hwrm_intf_maj >= 1)
4321 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4322
Michael Chan659c8052016-06-13 02:25:33 -04004323 bp->chip_num = le16_to_cpu(resp->chip_num);
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004324 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4325 !resp->chip_metal)
4326 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
Michael Chan659c8052016-06-13 02:25:33 -04004327
Michael Chanc0c050c2015-10-22 16:01:17 -04004328hwrm_ver_get_exit:
4329 mutex_unlock(&bp->hwrm_cmd_lock);
4330 return rc;
4331}
4332
Rob Swindell5ac67d82016-09-19 03:58:03 -04004333int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4334{
Rob Swindell878786d2016-09-20 03:36:33 -04004335#if IS_ENABLED(CONFIG_RTC_LIB)
Rob Swindell5ac67d82016-09-19 03:58:03 -04004336 struct hwrm_fw_set_time_input req = {0};
4337 struct rtc_time tm;
4338 struct timeval tv;
4339
4340 if (bp->hwrm_spec_code < 0x10400)
4341 return -EOPNOTSUPP;
4342
4343 do_gettimeofday(&tv);
4344 rtc_time_to_tm(tv.tv_sec, &tm);
4345 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4346 req.year = cpu_to_le16(1900 + tm.tm_year);
4347 req.month = 1 + tm.tm_mon;
4348 req.day = tm.tm_mday;
4349 req.hour = tm.tm_hour;
4350 req.minute = tm.tm_min;
4351 req.second = tm.tm_sec;
4352 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Rob Swindell878786d2016-09-20 03:36:33 -04004353#else
4354 return -EOPNOTSUPP;
4355#endif
Rob Swindell5ac67d82016-09-19 03:58:03 -04004356}
4357
Michael Chan3bdf56c2016-03-07 15:38:45 -05004358static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4359{
4360 int rc;
4361 struct bnxt_pf_info *pf = &bp->pf;
4362 struct hwrm_port_qstats_input req = {0};
4363
4364 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4365 return 0;
4366
4367 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4368 req.port_id = cpu_to_le16(pf->port_id);
4369 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4370 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4371 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4372 return rc;
4373}
4374
Michael Chanc0c050c2015-10-22 16:01:17 -04004375static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4376{
4377 if (bp->vxlan_port_cnt) {
4378 bnxt_hwrm_tunnel_dst_port_free(
4379 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4380 }
4381 bp->vxlan_port_cnt = 0;
4382 if (bp->nge_port_cnt) {
4383 bnxt_hwrm_tunnel_dst_port_free(
4384 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4385 }
4386 bp->nge_port_cnt = 0;
4387}
4388
4389static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4390{
4391 int rc, i;
4392 u32 tpa_flags = 0;
4393
4394 if (set_tpa)
4395 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4396 for (i = 0; i < bp->nr_vnics; i++) {
4397 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4398 if (rc) {
4399 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4400 rc, i);
4401 return rc;
4402 }
4403 }
4404 return 0;
4405}
4406
4407static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4408{
4409 int i;
4410
4411 for (i = 0; i < bp->nr_vnics; i++)
4412 bnxt_hwrm_vnic_set_rss(bp, i, false);
4413}
4414
4415static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4416 bool irq_re_init)
4417{
4418 if (bp->vnic_info) {
4419 bnxt_hwrm_clear_vnic_filter(bp);
4420 /* clear all RSS setting before free vnic ctx */
4421 bnxt_hwrm_clear_vnic_rss(bp);
4422 bnxt_hwrm_vnic_ctx_free(bp);
4423 /* before free the vnic, undo the vnic tpa settings */
4424 if (bp->flags & BNXT_FLAG_TPA)
4425 bnxt_set_tpa(bp, false);
4426 bnxt_hwrm_vnic_free(bp);
4427 }
4428 bnxt_hwrm_ring_free(bp, close_path);
4429 bnxt_hwrm_ring_grp_free(bp);
4430 if (irq_re_init) {
4431 bnxt_hwrm_stat_ctx_free(bp);
4432 bnxt_hwrm_free_tunnel_ports(bp);
4433 }
4434}
4435
4436static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4437{
4438 int rc;
4439
4440 /* allocate context for vnic */
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004441 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
Michael Chanc0c050c2015-10-22 16:01:17 -04004442 if (rc) {
4443 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4444 vnic_id, rc);
4445 goto vnic_setup_err;
4446 }
4447 bp->rsscos_nr_ctxs++;
4448
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004449 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4450 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
4451 if (rc) {
4452 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
4453 vnic_id, rc);
4454 goto vnic_setup_err;
4455 }
4456 bp->rsscos_nr_ctxs++;
4457 }
4458
Michael Chanc0c050c2015-10-22 16:01:17 -04004459 /* configure default vnic, ring grp */
4460 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4461 if (rc) {
4462 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4463 vnic_id, rc);
4464 goto vnic_setup_err;
4465 }
4466
4467 /* Enable RSS hashing on vnic */
4468 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4469 if (rc) {
4470 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4471 vnic_id, rc);
4472 goto vnic_setup_err;
4473 }
4474
4475 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4476 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4477 if (rc) {
4478 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4479 vnic_id, rc);
4480 }
4481 }
4482
4483vnic_setup_err:
4484 return rc;
4485}
4486
4487static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4488{
4489#ifdef CONFIG_RFS_ACCEL
4490 int i, rc = 0;
4491
4492 for (i = 0; i < bp->rx_nr_rings; i++) {
4493 u16 vnic_id = i + 1;
4494 u16 ring_id = i;
4495
4496 if (vnic_id >= bp->nr_vnics)
4497 break;
4498
4499 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
Michael Chanb81a90d2016-01-02 23:45:01 -05004500 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004501 if (rc) {
4502 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4503 vnic_id, rc);
4504 break;
4505 }
4506 rc = bnxt_setup_vnic(bp, vnic_id);
4507 if (rc)
4508 break;
4509 }
4510 return rc;
4511#else
4512 return 0;
4513#endif
4514}
4515
Michael Chan17c71ac2016-07-01 18:46:27 -04004516/* Allow PF and VF with default VLAN to be in promiscuous mode */
4517static bool bnxt_promisc_ok(struct bnxt *bp)
4518{
4519#ifdef CONFIG_BNXT_SRIOV
4520 if (BNXT_VF(bp) && !bp->vf.vlan)
4521 return false;
4522#endif
4523 return true;
4524}
4525
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004526static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
4527{
4528 unsigned int rc = 0;
4529
4530 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
4531 if (rc) {
4532 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4533 rc);
4534 return rc;
4535 }
4536
4537 rc = bnxt_hwrm_vnic_cfg(bp, 1);
4538 if (rc) {
4539 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4540 rc);
4541 return rc;
4542 }
4543 return rc;
4544}
4545
Michael Chanb664f002015-12-02 01:54:08 -05004546static int bnxt_cfg_rx_mode(struct bnxt *);
Michael Chan7d2837d2016-05-04 16:56:44 -04004547static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
Michael Chanb664f002015-12-02 01:54:08 -05004548
Michael Chanc0c050c2015-10-22 16:01:17 -04004549static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4550{
Michael Chan7d2837d2016-05-04 16:56:44 -04004551 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
Michael Chanc0c050c2015-10-22 16:01:17 -04004552 int rc = 0;
Prashant Sreedharan76595192016-07-18 07:15:22 -04004553 unsigned int rx_nr_rings = bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004554
4555 if (irq_re_init) {
4556 rc = bnxt_hwrm_stat_ctx_alloc(bp);
4557 if (rc) {
4558 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
4559 rc);
4560 goto err_out;
4561 }
4562 }
4563
4564 rc = bnxt_hwrm_ring_alloc(bp);
4565 if (rc) {
4566 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
4567 goto err_out;
4568 }
4569
4570 rc = bnxt_hwrm_ring_grp_alloc(bp);
4571 if (rc) {
4572 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
4573 goto err_out;
4574 }
4575
Prashant Sreedharan76595192016-07-18 07:15:22 -04004576 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4577 rx_nr_rings--;
4578
Michael Chanc0c050c2015-10-22 16:01:17 -04004579 /* default vnic 0 */
Prashant Sreedharan76595192016-07-18 07:15:22 -04004580 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004581 if (rc) {
4582 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
4583 goto err_out;
4584 }
4585
4586 rc = bnxt_setup_vnic(bp, 0);
4587 if (rc)
4588 goto err_out;
4589
4590 if (bp->flags & BNXT_FLAG_RFS) {
4591 rc = bnxt_alloc_rfs_vnics(bp);
4592 if (rc)
4593 goto err_out;
4594 }
4595
4596 if (bp->flags & BNXT_FLAG_TPA) {
4597 rc = bnxt_set_tpa(bp, true);
4598 if (rc)
4599 goto err_out;
4600 }
4601
4602 if (BNXT_VF(bp))
4603 bnxt_update_vf_mac(bp);
4604
4605 /* Filter for default vnic 0 */
4606 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
4607 if (rc) {
4608 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
4609 goto err_out;
4610 }
Michael Chan7d2837d2016-05-04 16:56:44 -04004611 vnic->uc_filter_count = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04004612
Michael Chan7d2837d2016-05-04 16:56:44 -04004613 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
Michael Chanc0c050c2015-10-22 16:01:17 -04004614
Michael Chan17c71ac2016-07-01 18:46:27 -04004615 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chan7d2837d2016-05-04 16:56:44 -04004616 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4617
4618 if (bp->dev->flags & IFF_ALLMULTI) {
4619 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4620 vnic->mc_list_count = 0;
4621 } else {
4622 u32 mask = 0;
4623
4624 bnxt_mc_list_updated(bp, &mask);
4625 vnic->rx_mask |= mask;
4626 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004627
Michael Chanb664f002015-12-02 01:54:08 -05004628 rc = bnxt_cfg_rx_mode(bp);
4629 if (rc)
Michael Chanc0c050c2015-10-22 16:01:17 -04004630 goto err_out;
Michael Chanc0c050c2015-10-22 16:01:17 -04004631
4632 rc = bnxt_hwrm_set_coal(bp);
4633 if (rc)
4634 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004635 rc);
4636
4637 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4638 rc = bnxt_setup_nitroa0_vnic(bp);
4639 if (rc)
4640 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
4641 rc);
4642 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004643
Michael Chancf6645f2016-06-13 02:25:28 -04004644 if (BNXT_VF(bp)) {
4645 bnxt_hwrm_func_qcfg(bp);
4646 netdev_update_features(bp->dev);
4647 }
4648
Michael Chanc0c050c2015-10-22 16:01:17 -04004649 return 0;
4650
4651err_out:
4652 bnxt_hwrm_resource_free(bp, 0, true);
4653
4654 return rc;
4655}
4656
4657static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
4658{
4659 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
4660 return 0;
4661}
4662
4663static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
4664{
4665 bnxt_init_rx_rings(bp);
4666 bnxt_init_tx_rings(bp);
4667 bnxt_init_ring_grps(bp, irq_re_init);
4668 bnxt_init_vnics(bp);
4669
4670 return bnxt_init_chip(bp, irq_re_init);
4671}
4672
4673static void bnxt_disable_int(struct bnxt *bp)
4674{
4675 int i;
4676
4677 if (!bp->bnapi)
4678 return;
4679
4680 for (i = 0; i < bp->cp_nr_rings; i++) {
4681 struct bnxt_napi *bnapi = bp->bnapi[i];
4682 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4683
4684 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4685 }
4686}
4687
4688static void bnxt_enable_int(struct bnxt *bp)
4689{
4690 int i;
4691
4692 atomic_set(&bp->intr_sem, 0);
4693 for (i = 0; i < bp->cp_nr_rings; i++) {
4694 struct bnxt_napi *bnapi = bp->bnapi[i];
4695 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4696
4697 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4698 }
4699}
4700
4701static int bnxt_set_real_num_queues(struct bnxt *bp)
4702{
4703 int rc;
4704 struct net_device *dev = bp->dev;
4705
4706 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4707 if (rc)
4708 return rc;
4709
4710 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4711 if (rc)
4712 return rc;
4713
4714#ifdef CONFIG_RFS_ACCEL
Michael Chan45019a12015-12-27 18:19:22 -05004715 if (bp->flags & BNXT_FLAG_RFS)
Michael Chanc0c050c2015-10-22 16:01:17 -04004716 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004717#endif
4718
4719 return rc;
4720}
4721
Michael Chan6e6c5a52016-01-02 23:45:02 -05004722static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
4723 bool shared)
4724{
4725 int _rx = *rx, _tx = *tx;
4726
4727 if (shared) {
4728 *rx = min_t(int, _rx, max);
4729 *tx = min_t(int, _tx, max);
4730 } else {
4731 if (max < 2)
4732 return -ENOMEM;
4733
4734 while (_rx + _tx > max) {
4735 if (_rx > _tx && _rx > 1)
4736 _rx--;
4737 else if (_tx > 1)
4738 _tx--;
4739 }
4740 *rx = _rx;
4741 *tx = _tx;
4742 }
4743 return 0;
4744}
4745
Michael Chanc0c050c2015-10-22 16:01:17 -04004746static int bnxt_setup_msix(struct bnxt *bp)
4747{
4748 struct msix_entry *msix_ent;
4749 struct net_device *dev = bp->dev;
Michael Chan01657bc2016-01-02 23:45:03 -05004750 int i, total_vecs, rc = 0, min = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04004751 const int len = sizeof(bp->irq_tbl[0].name);
4752
4753 bp->flags &= ~BNXT_FLAG_USING_MSIX;
4754 total_vecs = bp->cp_nr_rings;
4755
4756 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4757 if (!msix_ent)
4758 return -ENOMEM;
4759
4760 for (i = 0; i < total_vecs; i++) {
4761 msix_ent[i].entry = i;
4762 msix_ent[i].vector = 0;
4763 }
4764
Michael Chan01657bc2016-01-02 23:45:03 -05004765 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
4766 min = 2;
4767
4768 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
Michael Chanc0c050c2015-10-22 16:01:17 -04004769 if (total_vecs < 0) {
4770 rc = -ENODEV;
4771 goto msix_setup_exit;
4772 }
4773
4774 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4775 if (bp->irq_tbl) {
4776 int tcs;
4777
4778 /* Trim rings based upon num of vectors allocated */
Michael Chan6e6c5a52016-01-02 23:45:02 -05004779 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
Michael Chan01657bc2016-01-02 23:45:03 -05004780 total_vecs, min == 1);
Michael Chan6e6c5a52016-01-02 23:45:02 -05004781 if (rc)
4782 goto msix_setup_exit;
4783
Michael Chanc0c050c2015-10-22 16:01:17 -04004784 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4785 tcs = netdev_get_num_tc(dev);
4786 if (tcs > 1) {
4787 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4788 if (bp->tx_nr_rings_per_tc == 0) {
4789 netdev_reset_tc(dev);
4790 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4791 } else {
4792 int i, off, count;
4793
4794 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4795 for (i = 0; i < tcs; i++) {
4796 count = bp->tx_nr_rings_per_tc;
4797 off = i * count;
4798 netdev_set_tc_queue(dev, i, count, off);
4799 }
4800 }
4801 }
Michael Chan01657bc2016-01-02 23:45:03 -05004802 bp->cp_nr_rings = total_vecs;
Michael Chanc0c050c2015-10-22 16:01:17 -04004803
4804 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chan01657bc2016-01-02 23:45:03 -05004805 char *attr;
4806
Michael Chanc0c050c2015-10-22 16:01:17 -04004807 bp->irq_tbl[i].vector = msix_ent[i].vector;
Michael Chan01657bc2016-01-02 23:45:03 -05004808 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4809 attr = "TxRx";
4810 else if (i < bp->rx_nr_rings)
4811 attr = "rx";
4812 else
4813 attr = "tx";
4814
Michael Chanc0c050c2015-10-22 16:01:17 -04004815 snprintf(bp->irq_tbl[i].name, len,
Michael Chan01657bc2016-01-02 23:45:03 -05004816 "%s-%s-%d", dev->name, attr, i);
Michael Chanc0c050c2015-10-22 16:01:17 -04004817 bp->irq_tbl[i].handler = bnxt_msix;
4818 }
4819 rc = bnxt_set_real_num_queues(bp);
4820 if (rc)
4821 goto msix_setup_exit;
4822 } else {
4823 rc = -ENOMEM;
4824 goto msix_setup_exit;
4825 }
4826 bp->flags |= BNXT_FLAG_USING_MSIX;
4827 kfree(msix_ent);
4828 return 0;
4829
4830msix_setup_exit:
4831 netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
4832 pci_disable_msix(bp->pdev);
4833 kfree(msix_ent);
4834 return rc;
4835}
4836
4837static int bnxt_setup_inta(struct bnxt *bp)
4838{
4839 int rc;
4840 const int len = sizeof(bp->irq_tbl[0].name);
4841
4842 if (netdev_get_num_tc(bp->dev))
4843 netdev_reset_tc(bp->dev);
4844
4845 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
4846 if (!bp->irq_tbl) {
4847 rc = -ENOMEM;
4848 return rc;
4849 }
4850 bp->rx_nr_rings = 1;
4851 bp->tx_nr_rings = 1;
4852 bp->cp_nr_rings = 1;
4853 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
Michael Chan01657bc2016-01-02 23:45:03 -05004854 bp->flags |= BNXT_FLAG_SHARED_RINGS;
Michael Chanc0c050c2015-10-22 16:01:17 -04004855 bp->irq_tbl[0].vector = bp->pdev->irq;
4856 snprintf(bp->irq_tbl[0].name, len,
4857 "%s-%s-%d", bp->dev->name, "TxRx", 0);
4858 bp->irq_tbl[0].handler = bnxt_inta;
4859 rc = bnxt_set_real_num_queues(bp);
4860 return rc;
4861}
4862
4863static int bnxt_setup_int_mode(struct bnxt *bp)
4864{
4865 int rc = 0;
4866
4867 if (bp->flags & BNXT_FLAG_MSIX_CAP)
4868 rc = bnxt_setup_msix(bp);
4869
Michael Chan1fa72e22016-04-25 02:30:49 -04004870 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004871 /* fallback to INTA */
4872 rc = bnxt_setup_inta(bp);
4873 }
4874 return rc;
4875}
4876
4877static void bnxt_free_irq(struct bnxt *bp)
4878{
4879 struct bnxt_irq *irq;
4880 int i;
4881
4882#ifdef CONFIG_RFS_ACCEL
4883 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4884 bp->dev->rx_cpu_rmap = NULL;
4885#endif
4886 if (!bp->irq_tbl)
4887 return;
4888
4889 for (i = 0; i < bp->cp_nr_rings; i++) {
4890 irq = &bp->irq_tbl[i];
4891 if (irq->requested)
4892 free_irq(irq->vector, bp->bnapi[i]);
4893 irq->requested = 0;
4894 }
4895 if (bp->flags & BNXT_FLAG_USING_MSIX)
4896 pci_disable_msix(bp->pdev);
4897 kfree(bp->irq_tbl);
4898 bp->irq_tbl = NULL;
4899}
4900
4901static int bnxt_request_irq(struct bnxt *bp)
4902{
Michael Chanb81a90d2016-01-02 23:45:01 -05004903 int i, j, rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04004904 unsigned long flags = 0;
4905#ifdef CONFIG_RFS_ACCEL
4906 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4907#endif
4908
4909 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4910 flags = IRQF_SHARED;
4911
Michael Chanb81a90d2016-01-02 23:45:01 -05004912 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004913 struct bnxt_irq *irq = &bp->irq_tbl[i];
4914#ifdef CONFIG_RFS_ACCEL
Michael Chanb81a90d2016-01-02 23:45:01 -05004915 if (rmap && bp->bnapi[i]->rx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004916 rc = irq_cpu_rmap_add(rmap, irq->vector);
4917 if (rc)
4918 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05004919 j);
4920 j++;
Michael Chanc0c050c2015-10-22 16:01:17 -04004921 }
4922#endif
4923 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
4924 bp->bnapi[i]);
4925 if (rc)
4926 break;
4927
4928 irq->requested = 1;
4929 }
4930 return rc;
4931}
4932
4933static void bnxt_del_napi(struct bnxt *bp)
4934{
4935 int i;
4936
4937 if (!bp->bnapi)
4938 return;
4939
4940 for (i = 0; i < bp->cp_nr_rings; i++) {
4941 struct bnxt_napi *bnapi = bp->bnapi[i];
4942
4943 napi_hash_del(&bnapi->napi);
4944 netif_napi_del(&bnapi->napi);
4945 }
Eric Dumazete5f6f562016-11-16 06:31:52 -08004946 /* We called napi_hash_del() before netif_napi_del(), we need
4947 * to respect an RCU grace period before freeing napi structures.
4948 */
4949 synchronize_net();
Michael Chanc0c050c2015-10-22 16:01:17 -04004950}
4951
4952static void bnxt_init_napi(struct bnxt *bp)
4953{
4954 int i;
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04004955 unsigned int cp_nr_rings = bp->cp_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004956 struct bnxt_napi *bnapi;
4957
4958 if (bp->flags & BNXT_FLAG_USING_MSIX) {
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04004959 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4960 cp_nr_rings--;
4961 for (i = 0; i < cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004962 bnapi = bp->bnapi[i];
4963 netif_napi_add(bp->dev, &bnapi->napi,
4964 bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04004965 }
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04004966 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4967 bnapi = bp->bnapi[cp_nr_rings];
4968 netif_napi_add(bp->dev, &bnapi->napi,
4969 bnxt_poll_nitroa0, 64);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04004970 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004971 } else {
4972 bnapi = bp->bnapi[0];
4973 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04004974 }
4975}
4976
4977static void bnxt_disable_napi(struct bnxt *bp)
4978{
4979 int i;
4980
4981 if (!bp->bnapi)
4982 return;
4983
4984 for (i = 0; i < bp->cp_nr_rings; i++) {
4985 napi_disable(&bp->bnapi[i]->napi);
4986 bnxt_disable_poll(bp->bnapi[i]);
4987 }
4988}
4989
4990static void bnxt_enable_napi(struct bnxt *bp)
4991{
4992 int i;
4993
4994 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chanfa7e2812016-05-10 19:18:00 -04004995 bp->bnapi[i]->in_reset = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04004996 bnxt_enable_poll(bp->bnapi[i]);
4997 napi_enable(&bp->bnapi[i]->napi);
4998 }
4999}
5000
Michael Chan7df4ae92016-12-02 21:17:17 -05005001void bnxt_tx_disable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005002{
5003 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005004 struct bnxt_tx_ring_info *txr;
5005 struct netdev_queue *txq;
5006
Michael Chanb6ab4b02016-01-02 23:44:59 -05005007 if (bp->tx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005008 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005009 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005010 txq = netdev_get_tx_queue(bp->dev, i);
Michael Chanc0c050c2015-10-22 16:01:17 -04005011 txr->dev_state = BNXT_DEV_STATE_CLOSING;
Michael Chanc0c050c2015-10-22 16:01:17 -04005012 }
5013 }
5014 /* Stop all TX queues */
5015 netif_tx_disable(bp->dev);
5016 netif_carrier_off(bp->dev);
5017}
5018
Michael Chan7df4ae92016-12-02 21:17:17 -05005019void bnxt_tx_enable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005020{
5021 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005022 struct bnxt_tx_ring_info *txr;
5023 struct netdev_queue *txq;
5024
5025 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005026 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005027 txq = netdev_get_tx_queue(bp->dev, i);
5028 txr->dev_state = 0;
5029 }
5030 netif_tx_wake_all_queues(bp->dev);
5031 if (bp->link_info.link_up)
5032 netif_carrier_on(bp->dev);
5033}
5034
5035static void bnxt_report_link(struct bnxt *bp)
5036{
5037 if (bp->link_info.link_up) {
5038 const char *duplex;
5039 const char *flow_ctrl;
5040 u16 speed;
5041
5042 netif_carrier_on(bp->dev);
5043 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5044 duplex = "full";
5045 else
5046 duplex = "half";
5047 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5048 flow_ctrl = "ON - receive & transmit";
5049 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5050 flow_ctrl = "ON - transmit";
5051 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5052 flow_ctrl = "ON - receive";
5053 else
5054 flow_ctrl = "none";
5055 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
5056 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
5057 speed, duplex, flow_ctrl);
Michael Chan170ce012016-04-05 14:08:57 -04005058 if (bp->flags & BNXT_FLAG_EEE_CAP)
5059 netdev_info(bp->dev, "EEE is %s\n",
5060 bp->eee.eee_active ? "active" :
5061 "not active");
Michael Chanc0c050c2015-10-22 16:01:17 -04005062 } else {
5063 netif_carrier_off(bp->dev);
5064 netdev_err(bp->dev, "NIC Link is Down\n");
5065 }
5066}
5067
Michael Chan170ce012016-04-05 14:08:57 -04005068static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5069{
5070 int rc = 0;
5071 struct hwrm_port_phy_qcaps_input req = {0};
5072 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan93ed8112016-06-13 02:25:37 -04005073 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chan170ce012016-04-05 14:08:57 -04005074
5075 if (bp->hwrm_spec_code < 0x10201)
5076 return 0;
5077
5078 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5079
5080 mutex_lock(&bp->hwrm_cmd_lock);
5081 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5082 if (rc)
5083 goto hwrm_phy_qcaps_exit;
5084
5085 if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
5086 struct ethtool_eee *eee = &bp->eee;
5087 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5088
5089 bp->flags |= BNXT_FLAG_EEE_CAP;
5090 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5091 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5092 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5093 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5094 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5095 }
Michael Chan93ed8112016-06-13 02:25:37 -04005096 link_info->support_auto_speeds =
5097 le16_to_cpu(resp->supported_speeds_auto_mode);
Michael Chan170ce012016-04-05 14:08:57 -04005098
5099hwrm_phy_qcaps_exit:
5100 mutex_unlock(&bp->hwrm_cmd_lock);
5101 return rc;
5102}
5103
Michael Chanc0c050c2015-10-22 16:01:17 -04005104static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5105{
5106 int rc = 0;
5107 struct bnxt_link_info *link_info = &bp->link_info;
5108 struct hwrm_port_phy_qcfg_input req = {0};
5109 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5110 u8 link_up = link_info->link_up;
Michael Chan286ef9d2016-11-16 21:13:08 -05005111 u16 diff;
Michael Chanc0c050c2015-10-22 16:01:17 -04005112
5113 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5114
5115 mutex_lock(&bp->hwrm_cmd_lock);
5116 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5117 if (rc) {
5118 mutex_unlock(&bp->hwrm_cmd_lock);
5119 return rc;
5120 }
5121
5122 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5123 link_info->phy_link_status = resp->link;
5124 link_info->duplex = resp->duplex;
5125 link_info->pause = resp->pause;
5126 link_info->auto_mode = resp->auto_mode;
5127 link_info->auto_pause_setting = resp->auto_pause;
Michael Chan32773602016-03-07 15:38:42 -05005128 link_info->lp_pause = resp->link_partner_adv_pause;
Michael Chanc0c050c2015-10-22 16:01:17 -04005129 link_info->force_pause_setting = resp->force_pause;
Michael Chanc1935542015-12-27 18:19:28 -05005130 link_info->duplex_setting = resp->duplex;
Michael Chanc0c050c2015-10-22 16:01:17 -04005131 if (link_info->phy_link_status == BNXT_LINK_LINK)
5132 link_info->link_speed = le16_to_cpu(resp->link_speed);
5133 else
5134 link_info->link_speed = 0;
5135 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
Michael Chanc0c050c2015-10-22 16:01:17 -04005136 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5137 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
Michael Chan32773602016-03-07 15:38:42 -05005138 link_info->lp_auto_link_speeds =
5139 le16_to_cpu(resp->link_partner_adv_speeds);
Michael Chanc0c050c2015-10-22 16:01:17 -04005140 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5141 link_info->phy_ver[0] = resp->phy_maj;
5142 link_info->phy_ver[1] = resp->phy_min;
5143 link_info->phy_ver[2] = resp->phy_bld;
5144 link_info->media_type = resp->media_type;
Michael Chan03efbec2016-04-11 04:11:11 -04005145 link_info->phy_type = resp->phy_type;
Michael Chan11f15ed2016-04-05 14:08:55 -04005146 link_info->transceiver = resp->xcvr_pkg_type;
Michael Chan170ce012016-04-05 14:08:57 -04005147 link_info->phy_addr = resp->eee_config_phy_addr &
5148 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
Ajit Khaparde42ee18f2016-05-15 03:04:44 -04005149 link_info->module_status = resp->module_status;
Michael Chanc0c050c2015-10-22 16:01:17 -04005150
Michael Chan170ce012016-04-05 14:08:57 -04005151 if (bp->flags & BNXT_FLAG_EEE_CAP) {
5152 struct ethtool_eee *eee = &bp->eee;
5153 u16 fw_speeds;
5154
5155 eee->eee_active = 0;
5156 if (resp->eee_config_phy_addr &
5157 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5158 eee->eee_active = 1;
5159 fw_speeds = le16_to_cpu(
5160 resp->link_partner_adv_eee_link_speed_mask);
5161 eee->lp_advertised =
5162 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5163 }
5164
5165 /* Pull initial EEE config */
5166 if (!chng_link_state) {
5167 if (resp->eee_config_phy_addr &
5168 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5169 eee->eee_enabled = 1;
5170
5171 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5172 eee->advertised =
5173 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5174
5175 if (resp->eee_config_phy_addr &
5176 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5177 __le32 tmr;
5178
5179 eee->tx_lpi_enabled = 1;
5180 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5181 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5182 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5183 }
5184 }
5185 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005186 /* TODO: need to add more logic to report VF link */
5187 if (chng_link_state) {
5188 if (link_info->phy_link_status == BNXT_LINK_LINK)
5189 link_info->link_up = 1;
5190 else
5191 link_info->link_up = 0;
5192 if (link_up != link_info->link_up)
5193 bnxt_report_link(bp);
5194 } else {
5195 /* alwasy link down if not require to update link state */
5196 link_info->link_up = 0;
5197 }
5198 mutex_unlock(&bp->hwrm_cmd_lock);
Michael Chan286ef9d2016-11-16 21:13:08 -05005199
5200 diff = link_info->support_auto_speeds ^ link_info->advertising;
5201 if ((link_info->support_auto_speeds | diff) !=
5202 link_info->support_auto_speeds) {
5203 /* An advertised speed is no longer supported, so we need to
5204 * update the advertisement settings. See bnxt_reset() for
5205 * comments about the rtnl_lock() sequence below.
5206 */
5207 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5208 rtnl_lock();
5209 link_info->advertising = link_info->support_auto_speeds;
5210 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
5211 (link_info->autoneg & BNXT_AUTONEG_SPEED))
5212 bnxt_hwrm_set_link_setting(bp, true, false);
5213 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5214 rtnl_unlock();
5215 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005216 return 0;
5217}
5218
Michael Chan10289be2016-05-15 03:04:49 -04005219static void bnxt_get_port_module_status(struct bnxt *bp)
5220{
5221 struct bnxt_link_info *link_info = &bp->link_info;
5222 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5223 u8 module_status;
5224
5225 if (bnxt_update_link(bp, true))
5226 return;
5227
5228 module_status = link_info->module_status;
5229 switch (module_status) {
5230 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5231 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5232 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5233 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5234 bp->pf.port_id);
5235 if (bp->hwrm_spec_code >= 0x10201) {
5236 netdev_warn(bp->dev, "Module part number %s\n",
5237 resp->phy_vendor_partnumber);
5238 }
5239 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5240 netdev_warn(bp->dev, "TX is disabled\n");
5241 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5242 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5243 }
5244}
5245
Michael Chanc0c050c2015-10-22 16:01:17 -04005246static void
5247bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5248{
5249 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
Michael Chanc9ee9512016-04-05 14:08:56 -04005250 if (bp->hwrm_spec_code >= 0x10201)
5251 req->auto_pause =
5252 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
Michael Chanc0c050c2015-10-22 16:01:17 -04005253 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5254 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5255 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
Michael Chan49b5c7a2016-03-28 19:46:06 -04005256 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
Michael Chanc0c050c2015-10-22 16:01:17 -04005257 req->enables |=
5258 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5259 } else {
5260 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5261 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5262 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5263 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5264 req->enables |=
5265 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
Michael Chanc9ee9512016-04-05 14:08:56 -04005266 if (bp->hwrm_spec_code >= 0x10201) {
5267 req->auto_pause = req->force_pause;
5268 req->enables |= cpu_to_le32(
5269 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5270 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005271 }
5272}
5273
5274static void bnxt_hwrm_set_link_common(struct bnxt *bp,
5275 struct hwrm_port_phy_cfg_input *req)
5276{
5277 u8 autoneg = bp->link_info.autoneg;
5278 u16 fw_link_speed = bp->link_info.req_link_speed;
5279 u32 advertising = bp->link_info.advertising;
5280
5281 if (autoneg & BNXT_AUTONEG_SPEED) {
5282 req->auto_mode |=
Michael Chan11f15ed2016-04-05 14:08:55 -04005283 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04005284
5285 req->enables |= cpu_to_le32(
5286 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
5287 req->auto_link_speed_mask = cpu_to_le16(advertising);
5288
5289 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
5290 req->flags |=
5291 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
5292 } else {
5293 req->force_link_speed = cpu_to_le16(fw_link_speed);
5294 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
5295 }
5296
Michael Chanc0c050c2015-10-22 16:01:17 -04005297 /* tell chimp that the setting takes effect immediately */
5298 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
5299}
5300
5301int bnxt_hwrm_set_pause(struct bnxt *bp)
5302{
5303 struct hwrm_port_phy_cfg_input req = {0};
5304 int rc;
5305
5306 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5307 bnxt_hwrm_set_pause_common(bp, &req);
5308
5309 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
5310 bp->link_info.force_link_chng)
5311 bnxt_hwrm_set_link_common(bp, &req);
5312
5313 mutex_lock(&bp->hwrm_cmd_lock);
5314 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5315 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
5316 /* since changing of pause setting doesn't trigger any link
5317 * change event, the driver needs to update the current pause
5318 * result upon successfully return of the phy_cfg command
5319 */
5320 bp->link_info.pause =
5321 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
5322 bp->link_info.auto_pause_setting = 0;
5323 if (!bp->link_info.force_link_chng)
5324 bnxt_report_link(bp);
5325 }
5326 bp->link_info.force_link_chng = false;
5327 mutex_unlock(&bp->hwrm_cmd_lock);
5328 return rc;
5329}
5330
Michael Chan939f7f02016-04-05 14:08:58 -04005331static void bnxt_hwrm_set_eee(struct bnxt *bp,
5332 struct hwrm_port_phy_cfg_input *req)
5333{
5334 struct ethtool_eee *eee = &bp->eee;
5335
5336 if (eee->eee_enabled) {
5337 u16 eee_speeds;
5338 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
5339
5340 if (eee->tx_lpi_enabled)
5341 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
5342 else
5343 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
5344
5345 req->flags |= cpu_to_le32(flags);
5346 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
5347 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
5348 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
5349 } else {
5350 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
5351 }
5352}
5353
5354int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
Michael Chanc0c050c2015-10-22 16:01:17 -04005355{
5356 struct hwrm_port_phy_cfg_input req = {0};
5357
5358 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5359 if (set_pause)
5360 bnxt_hwrm_set_pause_common(bp, &req);
5361
5362 bnxt_hwrm_set_link_common(bp, &req);
Michael Chan939f7f02016-04-05 14:08:58 -04005363
5364 if (set_eee)
5365 bnxt_hwrm_set_eee(bp, &req);
Michael Chanc0c050c2015-10-22 16:01:17 -04005366 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5367}
5368
Michael Chan33f7d552016-04-11 04:11:12 -04005369static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
5370{
5371 struct hwrm_port_phy_cfg_input req = {0};
5372
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04005373 if (!BNXT_SINGLE_PF(bp))
Michael Chan33f7d552016-04-11 04:11:12 -04005374 return 0;
5375
5376 if (pci_num_vf(bp->pdev))
5377 return 0;
5378
5379 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
Michael Chan16d663a2016-11-16 21:13:07 -05005380 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
Michael Chan33f7d552016-04-11 04:11:12 -04005381 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5382}
5383
Michael Chan939f7f02016-04-05 14:08:58 -04005384static bool bnxt_eee_config_ok(struct bnxt *bp)
5385{
5386 struct ethtool_eee *eee = &bp->eee;
5387 struct bnxt_link_info *link_info = &bp->link_info;
5388
5389 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
5390 return true;
5391
5392 if (eee->eee_enabled) {
5393 u32 advertising =
5394 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
5395
5396 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5397 eee->eee_enabled = 0;
5398 return false;
5399 }
5400 if (eee->advertised & ~advertising) {
5401 eee->advertised = advertising & eee->supported;
5402 return false;
5403 }
5404 }
5405 return true;
5406}
5407
Michael Chanc0c050c2015-10-22 16:01:17 -04005408static int bnxt_update_phy_setting(struct bnxt *bp)
5409{
5410 int rc;
5411 bool update_link = false;
5412 bool update_pause = false;
Michael Chan939f7f02016-04-05 14:08:58 -04005413 bool update_eee = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04005414 struct bnxt_link_info *link_info = &bp->link_info;
5415
5416 rc = bnxt_update_link(bp, true);
5417 if (rc) {
5418 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
5419 rc);
5420 return rc;
5421 }
5422 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
Michael Chanc9ee9512016-04-05 14:08:56 -04005423 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
5424 link_info->req_flow_ctrl)
Michael Chanc0c050c2015-10-22 16:01:17 -04005425 update_pause = true;
5426 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
5427 link_info->force_pause_setting != link_info->req_flow_ctrl)
5428 update_pause = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04005429 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5430 if (BNXT_AUTO_MODE(link_info->auto_mode))
5431 update_link = true;
5432 if (link_info->req_link_speed != link_info->force_link_speed)
5433 update_link = true;
Michael Chande730182016-02-19 19:43:20 -05005434 if (link_info->req_duplex != link_info->duplex_setting)
5435 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04005436 } else {
5437 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
5438 update_link = true;
5439 if (link_info->advertising != link_info->auto_link_speeds)
5440 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04005441 }
5442
Michael Chan16d663a2016-11-16 21:13:07 -05005443 /* The last close may have shutdown the link, so need to call
5444 * PHY_CFG to bring it back up.
5445 */
5446 if (!netif_carrier_ok(bp->dev))
5447 update_link = true;
5448
Michael Chan939f7f02016-04-05 14:08:58 -04005449 if (!bnxt_eee_config_ok(bp))
5450 update_eee = true;
5451
Michael Chanc0c050c2015-10-22 16:01:17 -04005452 if (update_link)
Michael Chan939f7f02016-04-05 14:08:58 -04005453 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
Michael Chanc0c050c2015-10-22 16:01:17 -04005454 else if (update_pause)
5455 rc = bnxt_hwrm_set_pause(bp);
5456 if (rc) {
5457 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
5458 rc);
5459 return rc;
5460 }
5461
5462 return rc;
5463}
5464
Jeffrey Huang11809492015-11-05 16:25:49 -05005465/* Common routine to pre-map certain register block to different GRC window.
5466 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
5467 * in PF and 3 windows in VF that can be customized to map in different
5468 * register blocks.
5469 */
5470static void bnxt_preset_reg_win(struct bnxt *bp)
5471{
5472 if (BNXT_PF(bp)) {
5473 /* CAG registers map to GRC window #4 */
5474 writel(BNXT_CAG_REG_BASE,
5475 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
5476 }
5477}
5478
Michael Chanc0c050c2015-10-22 16:01:17 -04005479static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5480{
5481 int rc = 0;
5482
Jeffrey Huang11809492015-11-05 16:25:49 -05005483 bnxt_preset_reg_win(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005484 netif_carrier_off(bp->dev);
5485 if (irq_re_init) {
5486 rc = bnxt_setup_int_mode(bp);
5487 if (rc) {
5488 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
5489 rc);
5490 return rc;
5491 }
5492 }
5493 if ((bp->flags & BNXT_FLAG_RFS) &&
5494 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
5495 /* disable RFS if falling back to INTA */
5496 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
5497 bp->flags &= ~BNXT_FLAG_RFS;
5498 }
5499
5500 rc = bnxt_alloc_mem(bp, irq_re_init);
5501 if (rc) {
5502 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
5503 goto open_err_free_mem;
5504 }
5505
5506 if (irq_re_init) {
5507 bnxt_init_napi(bp);
5508 rc = bnxt_request_irq(bp);
5509 if (rc) {
5510 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
5511 goto open_err;
5512 }
5513 }
5514
5515 bnxt_enable_napi(bp);
5516
5517 rc = bnxt_init_nic(bp, irq_re_init);
5518 if (rc) {
5519 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
5520 goto open_err;
5521 }
5522
5523 if (link_re_init) {
5524 rc = bnxt_update_phy_setting(bp);
5525 if (rc)
Michael Chanba41d462016-02-19 19:43:21 -05005526 netdev_warn(bp->dev, "failed to update phy settings\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04005527 }
5528
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07005529 if (irq_re_init)
Alexander Duyckad51b8e2016-06-16 12:21:19 -07005530 udp_tunnel_get_rx_info(bp->dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04005531
Michael Chancaefe522015-12-09 19:35:42 -05005532 set_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04005533 bnxt_enable_int(bp);
5534 /* Enable TX queues */
5535 bnxt_tx_enable(bp);
5536 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan10289be2016-05-15 03:04:49 -04005537 /* Poll link status and check for SFP+ module status */
5538 bnxt_get_port_module_status(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005539
5540 return 0;
5541
5542open_err:
5543 bnxt_disable_napi(bp);
5544 bnxt_del_napi(bp);
5545
5546open_err_free_mem:
5547 bnxt_free_skbs(bp);
5548 bnxt_free_irq(bp);
5549 bnxt_free_mem(bp, true);
5550 return rc;
5551}
5552
5553/* rtnl_lock held */
5554int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5555{
5556 int rc = 0;
5557
5558 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
5559 if (rc) {
5560 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
5561 dev_close(bp->dev);
5562 }
5563 return rc;
5564}
5565
5566static int bnxt_open(struct net_device *dev)
5567{
5568 struct bnxt *bp = netdev_priv(dev);
5569 int rc = 0;
5570
Michael Chan2a5bedf2016-07-01 18:46:21 -04005571 if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
5572 rc = bnxt_hwrm_func_reset(bp);
5573 if (rc) {
5574 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
5575 rc);
5576 rc = -EBUSY;
5577 return rc;
5578 }
5579 /* Do func_reset during the 1st PF open only to prevent killing
5580 * the VFs when the PF is brought down and up.
5581 */
5582 if (BNXT_PF(bp))
5583 set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04005584 }
5585 return __bnxt_open_nic(bp, true, true);
5586}
5587
5588static void bnxt_disable_int_sync(struct bnxt *bp)
5589{
5590 int i;
5591
5592 atomic_inc(&bp->intr_sem);
5593 if (!netif_running(bp->dev))
5594 return;
5595
5596 bnxt_disable_int(bp);
5597 for (i = 0; i < bp->cp_nr_rings; i++)
5598 synchronize_irq(bp->irq_tbl[i].vector);
5599}
5600
5601int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5602{
5603 int rc = 0;
5604
5605#ifdef CONFIG_BNXT_SRIOV
5606 if (bp->sriov_cfg) {
5607 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
5608 !bp->sriov_cfg,
5609 BNXT_SRIOV_CFG_WAIT_TMO);
5610 if (rc)
5611 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
5612 }
5613#endif
5614 /* Change device state to avoid TX queue wake up's */
5615 bnxt_tx_disable(bp);
5616
Michael Chancaefe522015-12-09 19:35:42 -05005617 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chan4cebdce2015-12-09 19:35:43 -05005618 smp_mb__after_atomic();
5619 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
5620 msleep(20);
Michael Chanc0c050c2015-10-22 16:01:17 -04005621
5622 /* Flush rings before disabling interrupts */
5623 bnxt_shutdown_nic(bp, irq_re_init);
5624
5625 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
5626
5627 bnxt_disable_napi(bp);
5628 bnxt_disable_int_sync(bp);
5629 del_timer_sync(&bp->timer);
5630 bnxt_free_skbs(bp);
5631
5632 if (irq_re_init) {
5633 bnxt_free_irq(bp);
5634 bnxt_del_napi(bp);
5635 }
5636 bnxt_free_mem(bp, irq_re_init);
5637 return rc;
5638}
5639
5640static int bnxt_close(struct net_device *dev)
5641{
5642 struct bnxt *bp = netdev_priv(dev);
5643
5644 bnxt_close_nic(bp, true, true);
Michael Chan33f7d552016-04-11 04:11:12 -04005645 bnxt_hwrm_shutdown_link(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005646 return 0;
5647}
5648
5649/* rtnl_lock held */
5650static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5651{
5652 switch (cmd) {
5653 case SIOCGMIIPHY:
5654 /* fallthru */
5655 case SIOCGMIIREG: {
5656 if (!netif_running(dev))
5657 return -EAGAIN;
5658
5659 return 0;
5660 }
5661
5662 case SIOCSMIIREG:
5663 if (!netif_running(dev))
5664 return -EAGAIN;
5665
5666 return 0;
5667
5668 default:
5669 /* do nothing */
5670 break;
5671 }
5672 return -EOPNOTSUPP;
5673}
5674
5675static struct rtnl_link_stats64 *
5676bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5677{
5678 u32 i;
5679 struct bnxt *bp = netdev_priv(dev);
5680
5681 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5682
5683 if (!bp->bnapi)
5684 return stats;
5685
5686 /* TODO check if we need to synchronize with bnxt_close path */
5687 for (i = 0; i < bp->cp_nr_rings; i++) {
5688 struct bnxt_napi *bnapi = bp->bnapi[i];
5689 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5690 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
5691
5692 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
5693 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
5694 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
5695
5696 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
5697 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
5698 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
5699
5700 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
5701 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
5702 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
5703
5704 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
5705 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
5706 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
5707
5708 stats->rx_missed_errors +=
5709 le64_to_cpu(hw_stats->rx_discard_pkts);
5710
5711 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
5712
Michael Chanc0c050c2015-10-22 16:01:17 -04005713 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
5714 }
5715
Michael Chan9947f832016-03-07 15:38:46 -05005716 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5717 struct rx_port_stats *rx = bp->hw_rx_port_stats;
5718 struct tx_port_stats *tx = bp->hw_tx_port_stats;
5719
5720 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
5721 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
5722 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
5723 le64_to_cpu(rx->rx_ovrsz_frames) +
5724 le64_to_cpu(rx->rx_runt_frames);
5725 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
5726 le64_to_cpu(rx->rx_jbr_frames);
5727 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
5728 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
5729 stats->tx_errors = le64_to_cpu(tx->tx_err);
5730 }
5731
Michael Chanc0c050c2015-10-22 16:01:17 -04005732 return stats;
5733}
5734
5735static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
5736{
5737 struct net_device *dev = bp->dev;
5738 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5739 struct netdev_hw_addr *ha;
5740 u8 *haddr;
5741 int mc_count = 0;
5742 bool update = false;
5743 int off = 0;
5744
5745 netdev_for_each_mc_addr(ha, dev) {
5746 if (mc_count >= BNXT_MAX_MC_ADDRS) {
5747 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5748 vnic->mc_list_count = 0;
5749 return false;
5750 }
5751 haddr = ha->addr;
5752 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
5753 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
5754 update = true;
5755 }
5756 off += ETH_ALEN;
5757 mc_count++;
5758 }
5759 if (mc_count)
5760 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
5761
5762 if (mc_count != vnic->mc_list_count) {
5763 vnic->mc_list_count = mc_count;
5764 update = true;
5765 }
5766 return update;
5767}
5768
5769static bool bnxt_uc_list_updated(struct bnxt *bp)
5770{
5771 struct net_device *dev = bp->dev;
5772 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5773 struct netdev_hw_addr *ha;
5774 int off = 0;
5775
5776 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
5777 return true;
5778
5779 netdev_for_each_uc_addr(ha, dev) {
5780 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
5781 return true;
5782
5783 off += ETH_ALEN;
5784 }
5785 return false;
5786}
5787
5788static void bnxt_set_rx_mode(struct net_device *dev)
5789{
5790 struct bnxt *bp = netdev_priv(dev);
5791 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5792 u32 mask = vnic->rx_mask;
5793 bool mc_update = false;
5794 bool uc_update;
5795
5796 if (!netif_running(dev))
5797 return;
5798
5799 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
5800 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
5801 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
5802
Michael Chan17c71ac2016-07-01 18:46:27 -04005803 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04005804 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5805
5806 uc_update = bnxt_uc_list_updated(bp);
5807
5808 if (dev->flags & IFF_ALLMULTI) {
5809 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5810 vnic->mc_list_count = 0;
5811 } else {
5812 mc_update = bnxt_mc_list_updated(bp, &mask);
5813 }
5814
5815 if (mask != vnic->rx_mask || uc_update || mc_update) {
5816 vnic->rx_mask = mask;
5817
5818 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
5819 schedule_work(&bp->sp_task);
5820 }
5821}
5822
Michael Chanb664f002015-12-02 01:54:08 -05005823static int bnxt_cfg_rx_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005824{
5825 struct net_device *dev = bp->dev;
5826 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5827 struct netdev_hw_addr *ha;
5828 int i, off = 0, rc;
5829 bool uc_update;
5830
5831 netif_addr_lock_bh(dev);
5832 uc_update = bnxt_uc_list_updated(bp);
5833 netif_addr_unlock_bh(dev);
5834
5835 if (!uc_update)
5836 goto skip_uc;
5837
5838 mutex_lock(&bp->hwrm_cmd_lock);
5839 for (i = 1; i < vnic->uc_filter_count; i++) {
5840 struct hwrm_cfa_l2_filter_free_input req = {0};
5841
5842 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
5843 -1);
5844
5845 req.l2_filter_id = vnic->fw_l2_filter_id[i];
5846
5847 rc = _hwrm_send_message(bp, &req, sizeof(req),
5848 HWRM_CMD_TIMEOUT);
5849 }
5850 mutex_unlock(&bp->hwrm_cmd_lock);
5851
5852 vnic->uc_filter_count = 1;
5853
5854 netif_addr_lock_bh(dev);
5855 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
5856 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5857 } else {
5858 netdev_for_each_uc_addr(ha, dev) {
5859 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
5860 off += ETH_ALEN;
5861 vnic->uc_filter_count++;
5862 }
5863 }
5864 netif_addr_unlock_bh(dev);
5865
5866 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
5867 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
5868 if (rc) {
5869 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
5870 rc);
5871 vnic->uc_filter_count = i;
Michael Chanb664f002015-12-02 01:54:08 -05005872 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04005873 }
5874 }
5875
5876skip_uc:
5877 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
5878 if (rc)
5879 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
5880 rc);
Michael Chanb664f002015-12-02 01:54:08 -05005881
5882 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04005883}
5884
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005885static bool bnxt_rfs_capable(struct bnxt *bp)
5886{
5887#ifdef CONFIG_RFS_ACCEL
5888 struct bnxt_pf_info *pf = &bp->pf;
5889 int vnics;
5890
5891 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
5892 return false;
5893
5894 vnics = 1 + bp->rx_nr_rings;
Vasundhara Volama2304902016-07-25 12:33:36 -04005895 if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
5896 netdev_warn(bp->dev,
5897 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
5898 min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005899 return false;
Vasundhara Volama2304902016-07-25 12:33:36 -04005900 }
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005901
5902 return true;
5903#else
5904 return false;
5905#endif
5906}
5907
Michael Chanc0c050c2015-10-22 16:01:17 -04005908static netdev_features_t bnxt_fix_features(struct net_device *dev,
5909 netdev_features_t features)
5910{
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005911 struct bnxt *bp = netdev_priv(dev);
5912
Vasundhara Volama2304902016-07-25 12:33:36 -04005913 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005914 features &= ~NETIF_F_NTUPLE;
Michael Chan5a9f6b22016-06-06 02:37:15 -04005915
5916 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
5917 * turned on or off together.
5918 */
5919 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
5920 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
5921 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
5922 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
5923 NETIF_F_HW_VLAN_STAG_RX);
5924 else
5925 features |= NETIF_F_HW_VLAN_CTAG_RX |
5926 NETIF_F_HW_VLAN_STAG_RX;
5927 }
Michael Chancf6645f2016-06-13 02:25:28 -04005928#ifdef CONFIG_BNXT_SRIOV
5929 if (BNXT_VF(bp)) {
5930 if (bp->vf.vlan) {
5931 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
5932 NETIF_F_HW_VLAN_STAG_RX);
5933 }
5934 }
5935#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04005936 return features;
5937}
5938
5939static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
5940{
5941 struct bnxt *bp = netdev_priv(dev);
5942 u32 flags = bp->flags;
5943 u32 changes;
5944 int rc = 0;
5945 bool re_init = false;
5946 bool update_tpa = false;
5947
5948 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04005949 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04005950 flags |= BNXT_FLAG_GRO;
5951 if (features & NETIF_F_LRO)
5952 flags |= BNXT_FLAG_LRO;
5953
5954 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5955 flags |= BNXT_FLAG_STRIP_VLAN;
5956
5957 if (features & NETIF_F_NTUPLE)
5958 flags |= BNXT_FLAG_RFS;
5959
5960 changes = flags ^ bp->flags;
5961 if (changes & BNXT_FLAG_TPA) {
5962 update_tpa = true;
5963 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
5964 (flags & BNXT_FLAG_TPA) == 0)
5965 re_init = true;
5966 }
5967
5968 if (changes & ~BNXT_FLAG_TPA)
5969 re_init = true;
5970
5971 if (flags != bp->flags) {
5972 u32 old_flags = bp->flags;
5973
5974 bp->flags = flags;
5975
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005976 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005977 if (update_tpa)
5978 bnxt_set_ring_params(bp);
5979 return rc;
5980 }
5981
5982 if (re_init) {
5983 bnxt_close_nic(bp, false, false);
5984 if (update_tpa)
5985 bnxt_set_ring_params(bp);
5986
5987 return bnxt_open_nic(bp, false, false);
5988 }
5989 if (update_tpa) {
5990 rc = bnxt_set_tpa(bp,
5991 (flags & BNXT_FLAG_TPA) ?
5992 true : false);
5993 if (rc)
5994 bp->flags = old_flags;
5995 }
5996 }
5997 return rc;
5998}
5999
Michael Chan9f554592016-01-02 23:44:58 -05006000static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6001{
Michael Chanb6ab4b02016-01-02 23:44:59 -05006002 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05006003 int i = bnapi->index;
6004
Michael Chan3b2b7d92016-01-02 23:45:00 -05006005 if (!txr)
6006 return;
6007
Michael Chan9f554592016-01-02 23:44:58 -05006008 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6009 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6010 txr->tx_cons);
6011}
6012
6013static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6014{
Michael Chanb6ab4b02016-01-02 23:44:59 -05006015 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05006016 int i = bnapi->index;
6017
Michael Chan3b2b7d92016-01-02 23:45:00 -05006018 if (!rxr)
6019 return;
6020
Michael Chan9f554592016-01-02 23:44:58 -05006021 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6022 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6023 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6024 rxr->rx_sw_agg_prod);
6025}
6026
6027static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6028{
6029 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6030 int i = bnapi->index;
6031
6032 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6033 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6034}
6035
Michael Chanc0c050c2015-10-22 16:01:17 -04006036static void bnxt_dbg_dump_states(struct bnxt *bp)
6037{
6038 int i;
6039 struct bnxt_napi *bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04006040
6041 for (i = 0; i < bp->cp_nr_rings; i++) {
6042 bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04006043 if (netif_msg_drv(bp)) {
Michael Chan9f554592016-01-02 23:44:58 -05006044 bnxt_dump_tx_sw_state(bnapi);
6045 bnxt_dump_rx_sw_state(bnapi);
6046 bnxt_dump_cp_sw_state(bnapi);
Michael Chanc0c050c2015-10-22 16:01:17 -04006047 }
6048 }
6049}
6050
Michael Chan6988bd92016-06-13 02:25:29 -04006051static void bnxt_reset_task(struct bnxt *bp, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04006052{
Michael Chan6988bd92016-06-13 02:25:29 -04006053 if (!silent)
6054 bnxt_dbg_dump_states(bp);
Michael Chan028de142015-12-09 19:35:44 -05006055 if (netif_running(bp->dev)) {
6056 bnxt_close_nic(bp, false, false);
6057 bnxt_open_nic(bp, false, false);
6058 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006059}
6060
6061static void bnxt_tx_timeout(struct net_device *dev)
6062{
6063 struct bnxt *bp = netdev_priv(dev);
6064
6065 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6066 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6067 schedule_work(&bp->sp_task);
6068}
6069
6070#ifdef CONFIG_NET_POLL_CONTROLLER
6071static void bnxt_poll_controller(struct net_device *dev)
6072{
6073 struct bnxt *bp = netdev_priv(dev);
6074 int i;
6075
6076 for (i = 0; i < bp->cp_nr_rings; i++) {
6077 struct bnxt_irq *irq = &bp->irq_tbl[i];
6078
6079 disable_irq(irq->vector);
6080 irq->handler(irq->vector, bp->bnapi[i]);
6081 enable_irq(irq->vector);
6082 }
6083}
6084#endif
6085
6086static void bnxt_timer(unsigned long data)
6087{
6088 struct bnxt *bp = (struct bnxt *)data;
6089 struct net_device *dev = bp->dev;
6090
6091 if (!netif_running(dev))
6092 return;
6093
6094 if (atomic_read(&bp->intr_sem) != 0)
6095 goto bnxt_restart_timer;
6096
Michael Chan3bdf56c2016-03-07 15:38:45 -05006097 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
6098 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6099 schedule_work(&bp->sp_task);
6100 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006101bnxt_restart_timer:
6102 mod_timer(&bp->timer, jiffies + bp->current_interval);
6103}
6104
Michael Chan6988bd92016-06-13 02:25:29 -04006105/* Only called from bnxt_sp_task() */
6106static void bnxt_reset(struct bnxt *bp, bool silent)
6107{
6108 /* bnxt_reset_task() calls bnxt_close_nic() which waits
6109 * for BNXT_STATE_IN_SP_TASK to clear.
6110 * If there is a parallel dev_close(), bnxt_close() may be holding
6111 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6112 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6113 */
6114 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6115 rtnl_lock();
6116 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6117 bnxt_reset_task(bp, silent);
6118 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6119 rtnl_unlock();
6120}
6121
Michael Chanc0c050c2015-10-22 16:01:17 -04006122static void bnxt_cfg_ntp_filters(struct bnxt *);
6123
6124static void bnxt_sp_task(struct work_struct *work)
6125{
6126 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6127 int rc;
6128
Michael Chan4cebdce2015-12-09 19:35:43 -05006129 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6130 smp_mb__after_atomic();
6131 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6132 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006133 return;
Michael Chan4cebdce2015-12-09 19:35:43 -05006134 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006135
6136 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
6137 bnxt_cfg_rx_mode(bp);
6138
6139 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6140 bnxt_cfg_ntp_filters(bp);
6141 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
Michael Chan286ef9d2016-11-16 21:13:08 -05006142 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6143 &bp->sp_event))
6144 bnxt_hwrm_phy_qcaps(bp);
6145
Michael Chanc0c050c2015-10-22 16:01:17 -04006146 rc = bnxt_update_link(bp, true);
6147 if (rc)
6148 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6149 rc);
6150 }
6151 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6152 bnxt_hwrm_exec_fwd_req(bp);
6153 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6154 bnxt_hwrm_tunnel_dst_port_alloc(
6155 bp, bp->vxlan_port,
6156 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6157 }
6158 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6159 bnxt_hwrm_tunnel_dst_port_free(
6160 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6161 }
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006162 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6163 bnxt_hwrm_tunnel_dst_port_alloc(
6164 bp, bp->nge_port,
6165 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6166 }
6167 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6168 bnxt_hwrm_tunnel_dst_port_free(
6169 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6170 }
Michael Chan6988bd92016-06-13 02:25:29 -04006171 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6172 bnxt_reset(bp, false);
Michael Chan4cebdce2015-12-09 19:35:43 -05006173
Michael Chanfc0f1922016-06-13 02:25:30 -04006174 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6175 bnxt_reset(bp, true);
6176
Michael Chan4bb13ab2016-04-05 14:09:01 -04006177 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
Michael Chan10289be2016-05-15 03:04:49 -04006178 bnxt_get_port_module_status(bp);
Michael Chan4bb13ab2016-04-05 14:09:01 -04006179
Michael Chan3bdf56c2016-03-07 15:38:45 -05006180 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6181 bnxt_hwrm_port_qstats(bp);
6182
Michael Chan4cebdce2015-12-09 19:35:43 -05006183 smp_mb__before_atomic();
6184 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006185}
6186
6187static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
6188{
6189 int rc;
6190 struct bnxt *bp = netdev_priv(dev);
6191
6192 SET_NETDEV_DEV(dev, &pdev->dev);
6193
6194 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6195 rc = pci_enable_device(pdev);
6196 if (rc) {
6197 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
6198 goto init_err;
6199 }
6200
6201 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6202 dev_err(&pdev->dev,
6203 "Cannot find PCI device base address, aborting\n");
6204 rc = -ENODEV;
6205 goto init_err_disable;
6206 }
6207
6208 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6209 if (rc) {
6210 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
6211 goto init_err_disable;
6212 }
6213
6214 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
6215 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6216 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
6217 goto init_err_disable;
6218 }
6219
6220 pci_set_master(pdev);
6221
6222 bp->dev = dev;
6223 bp->pdev = pdev;
6224
6225 bp->bar0 = pci_ioremap_bar(pdev, 0);
6226 if (!bp->bar0) {
6227 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
6228 rc = -ENOMEM;
6229 goto init_err_release;
6230 }
6231
6232 bp->bar1 = pci_ioremap_bar(pdev, 2);
6233 if (!bp->bar1) {
6234 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
6235 rc = -ENOMEM;
6236 goto init_err_release;
6237 }
6238
6239 bp->bar2 = pci_ioremap_bar(pdev, 4);
6240 if (!bp->bar2) {
6241 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
6242 rc = -ENOMEM;
6243 goto init_err_release;
6244 }
6245
Satish Baddipadige6316ea62016-03-07 15:38:48 -05006246 pci_enable_pcie_error_reporting(pdev);
6247
Michael Chanc0c050c2015-10-22 16:01:17 -04006248 INIT_WORK(&bp->sp_task, bnxt_sp_task);
6249
6250 spin_lock_init(&bp->ntp_fltr_lock);
6251
6252 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
6253 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
6254
Michael Chandfb5b892016-02-26 04:00:01 -05006255 /* tick values in micro seconds */
Michael Chandfc9c942016-02-26 04:00:03 -05006256 bp->rx_coal_ticks = 12;
6257 bp->rx_coal_bufs = 30;
Michael Chandfb5b892016-02-26 04:00:01 -05006258 bp->rx_coal_ticks_irq = 1;
6259 bp->rx_coal_bufs_irq = 2;
Michael Chanc0c050c2015-10-22 16:01:17 -04006260
Michael Chandfc9c942016-02-26 04:00:03 -05006261 bp->tx_coal_ticks = 25;
6262 bp->tx_coal_bufs = 30;
6263 bp->tx_coal_ticks_irq = 2;
6264 bp->tx_coal_bufs_irq = 2;
6265
Michael Chan51f30782016-07-01 18:46:29 -04006266 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
6267
Michael Chanc0c050c2015-10-22 16:01:17 -04006268 init_timer(&bp->timer);
6269 bp->timer.data = (unsigned long)bp;
6270 bp->timer.function = bnxt_timer;
6271 bp->current_interval = BNXT_TIMER_INTERVAL;
6272
Michael Chancaefe522015-12-09 19:35:42 -05006273 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006274
6275 return 0;
6276
6277init_err_release:
6278 if (bp->bar2) {
6279 pci_iounmap(pdev, bp->bar2);
6280 bp->bar2 = NULL;
6281 }
6282
6283 if (bp->bar1) {
6284 pci_iounmap(pdev, bp->bar1);
6285 bp->bar1 = NULL;
6286 }
6287
6288 if (bp->bar0) {
6289 pci_iounmap(pdev, bp->bar0);
6290 bp->bar0 = NULL;
6291 }
6292
6293 pci_release_regions(pdev);
6294
6295init_err_disable:
6296 pci_disable_device(pdev);
6297
6298init_err:
6299 return rc;
6300}
6301
6302/* rtnl_lock held */
6303static int bnxt_change_mac_addr(struct net_device *dev, void *p)
6304{
6305 struct sockaddr *addr = p;
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05006306 struct bnxt *bp = netdev_priv(dev);
6307 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006308
6309 if (!is_valid_ether_addr(addr->sa_data))
6310 return -EADDRNOTAVAIL;
6311
Michael Chan84c33dd2016-04-11 04:11:13 -04006312 rc = bnxt_approve_mac(bp, addr->sa_data);
6313 if (rc)
6314 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006315
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05006316 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
6317 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006318
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05006319 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6320 if (netif_running(dev)) {
6321 bnxt_close_nic(bp, false, false);
6322 rc = bnxt_open_nic(bp, false, false);
6323 }
6324
6325 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006326}
6327
6328/* rtnl_lock held */
6329static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
6330{
6331 struct bnxt *bp = netdev_priv(dev);
6332
Michael Chanc0c050c2015-10-22 16:01:17 -04006333 if (netif_running(dev))
6334 bnxt_close_nic(bp, false, false);
6335
6336 dev->mtu = new_mtu;
6337 bnxt_set_ring_params(bp);
6338
6339 if (netif_running(dev))
6340 return bnxt_open_nic(bp, false, false);
6341
6342 return 0;
6343}
6344
Michael Chanc5e3deb2016-12-02 21:17:15 -05006345int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
Michael Chanc0c050c2015-10-22 16:01:17 -04006346{
6347 struct bnxt *bp = netdev_priv(dev);
Michael Chan3ffb6a32016-11-11 00:11:42 -05006348 bool sh = false;
John Fastabend16e5cc62016-02-16 21:16:43 -08006349
Michael Chanc0c050c2015-10-22 16:01:17 -04006350 if (tc > bp->max_tc) {
6351 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
6352 tc, bp->max_tc);
6353 return -EINVAL;
6354 }
6355
6356 if (netdev_get_num_tc(dev) == tc)
6357 return 0;
6358
Michael Chan3ffb6a32016-11-11 00:11:42 -05006359 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6360 sh = true;
6361
Michael Chanc0c050c2015-10-22 16:01:17 -04006362 if (tc) {
Michael Chan6e6c5a52016-01-02 23:45:02 -05006363 int max_rx_rings, max_tx_rings, rc;
Michael Chan01657bc2016-01-02 23:45:03 -05006364
6365 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
Michael Chan6e6c5a52016-01-02 23:45:02 -05006366 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
Michael Chanc0c050c2015-10-22 16:01:17 -04006367 return -ENOMEM;
6368 }
6369
6370 /* Needs to close the device and do hw resource re-allocations */
6371 if (netif_running(bp->dev))
6372 bnxt_close_nic(bp, true, false);
6373
6374 if (tc) {
6375 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
6376 netdev_set_num_tc(dev, tc);
6377 } else {
6378 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6379 netdev_reset_tc(dev);
6380 }
Michael Chan3ffb6a32016-11-11 00:11:42 -05006381 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6382 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04006383 bp->num_stat_ctxs = bp->cp_nr_rings;
6384
6385 if (netif_running(bp->dev))
6386 return bnxt_open_nic(bp, true, false);
6387
6388 return 0;
6389}
6390
Michael Chanc5e3deb2016-12-02 21:17:15 -05006391static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6392 struct tc_to_netdev *ntc)
6393{
6394 if (ntc->type != TC_SETUP_MQPRIO)
6395 return -EINVAL;
6396
6397 return bnxt_setup_mq_tc(dev, ntc->tc);
6398}
6399
Michael Chanc0c050c2015-10-22 16:01:17 -04006400#ifdef CONFIG_RFS_ACCEL
6401static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
6402 struct bnxt_ntuple_filter *f2)
6403{
6404 struct flow_keys *keys1 = &f1->fkeys;
6405 struct flow_keys *keys2 = &f2->fkeys;
6406
6407 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
6408 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
6409 keys1->ports.ports == keys2->ports.ports &&
6410 keys1->basic.ip_proto == keys2->basic.ip_proto &&
6411 keys1->basic.n_proto == keys2->basic.n_proto &&
Michael Chana54c4d72016-07-25 12:33:35 -04006412 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
6413 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
Michael Chanc0c050c2015-10-22 16:01:17 -04006414 return true;
6415
6416 return false;
6417}
6418
6419static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
6420 u16 rxq_index, u32 flow_id)
6421{
6422 struct bnxt *bp = netdev_priv(dev);
6423 struct bnxt_ntuple_filter *fltr, *new_fltr;
6424 struct flow_keys *fkeys;
6425 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
Michael Chana54c4d72016-07-25 12:33:35 -04006426 int rc = 0, idx, bit_id, l2_idx = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006427 struct hlist_head *head;
6428
6429 if (skb->encapsulation)
6430 return -EPROTONOSUPPORT;
6431
Michael Chana54c4d72016-07-25 12:33:35 -04006432 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
6433 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6434 int off = 0, j;
6435
6436 netif_addr_lock_bh(dev);
6437 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
6438 if (ether_addr_equal(eth->h_dest,
6439 vnic->uc_list + off)) {
6440 l2_idx = j + 1;
6441 break;
6442 }
6443 }
6444 netif_addr_unlock_bh(dev);
6445 if (!l2_idx)
6446 return -EINVAL;
6447 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006448 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
6449 if (!new_fltr)
6450 return -ENOMEM;
6451
6452 fkeys = &new_fltr->fkeys;
6453 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
6454 rc = -EPROTONOSUPPORT;
6455 goto err_free;
6456 }
6457
6458 if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
6459 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
6460 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
6461 rc = -EPROTONOSUPPORT;
6462 goto err_free;
6463 }
6464
Michael Chana54c4d72016-07-25 12:33:35 -04006465 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04006466 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
6467
6468 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
6469 head = &bp->ntp_fltr_hash_tbl[idx];
6470 rcu_read_lock();
6471 hlist_for_each_entry_rcu(fltr, head, hash) {
6472 if (bnxt_fltr_match(fltr, new_fltr)) {
6473 rcu_read_unlock();
6474 rc = 0;
6475 goto err_free;
6476 }
6477 }
6478 rcu_read_unlock();
6479
6480 spin_lock_bh(&bp->ntp_fltr_lock);
Michael Chan84e86b92015-11-05 16:25:50 -05006481 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6482 BNXT_NTP_FLTR_MAX_FLTR, 0);
6483 if (bit_id < 0) {
Michael Chanc0c050c2015-10-22 16:01:17 -04006484 spin_unlock_bh(&bp->ntp_fltr_lock);
6485 rc = -ENOMEM;
6486 goto err_free;
6487 }
6488
Michael Chan84e86b92015-11-05 16:25:50 -05006489 new_fltr->sw_id = (u16)bit_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04006490 new_fltr->flow_id = flow_id;
Michael Chana54c4d72016-07-25 12:33:35 -04006491 new_fltr->l2_fltr_idx = l2_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04006492 new_fltr->rxq = rxq_index;
6493 hlist_add_head_rcu(&new_fltr->hash, head);
6494 bp->ntp_fltr_count++;
6495 spin_unlock_bh(&bp->ntp_fltr_lock);
6496
6497 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
6498 schedule_work(&bp->sp_task);
6499
6500 return new_fltr->sw_id;
6501
6502err_free:
6503 kfree(new_fltr);
6504 return rc;
6505}
6506
6507static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6508{
6509 int i;
6510
6511 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
6512 struct hlist_head *head;
6513 struct hlist_node *tmp;
6514 struct bnxt_ntuple_filter *fltr;
6515 int rc;
6516
6517 head = &bp->ntp_fltr_hash_tbl[i];
6518 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
6519 bool del = false;
6520
6521 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
6522 if (rps_may_expire_flow(bp->dev, fltr->rxq,
6523 fltr->flow_id,
6524 fltr->sw_id)) {
6525 bnxt_hwrm_cfa_ntuple_filter_free(bp,
6526 fltr);
6527 del = true;
6528 }
6529 } else {
6530 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
6531 fltr);
6532 if (rc)
6533 del = true;
6534 else
6535 set_bit(BNXT_FLTR_VALID, &fltr->state);
6536 }
6537
6538 if (del) {
6539 spin_lock_bh(&bp->ntp_fltr_lock);
6540 hlist_del_rcu(&fltr->hash);
6541 bp->ntp_fltr_count--;
6542 spin_unlock_bh(&bp->ntp_fltr_lock);
6543 synchronize_rcu();
6544 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
6545 kfree(fltr);
6546 }
6547 }
6548 }
Jeffrey Huang19241362016-02-26 04:00:00 -05006549 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
6550 netdev_info(bp->dev, "Receive PF driver unload event!");
Michael Chanc0c050c2015-10-22 16:01:17 -04006551}
6552
6553#else
6554
6555static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6556{
6557}
6558
6559#endif /* CONFIG_RFS_ACCEL */
6560
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006561static void bnxt_udp_tunnel_add(struct net_device *dev,
6562 struct udp_tunnel_info *ti)
Michael Chanc0c050c2015-10-22 16:01:17 -04006563{
6564 struct bnxt *bp = netdev_priv(dev);
6565
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006566 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6567 return;
6568
Michael Chanc0c050c2015-10-22 16:01:17 -04006569 if (!netif_running(dev))
6570 return;
6571
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006572 switch (ti->type) {
6573 case UDP_TUNNEL_TYPE_VXLAN:
6574 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
6575 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04006576
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006577 bp->vxlan_port_cnt++;
6578 if (bp->vxlan_port_cnt == 1) {
6579 bp->vxlan_port = ti->port;
6580 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04006581 schedule_work(&bp->sp_task);
6582 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006583 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006584 case UDP_TUNNEL_TYPE_GENEVE:
6585 if (bp->nge_port_cnt && bp->nge_port != ti->port)
6586 return;
6587
6588 bp->nge_port_cnt++;
6589 if (bp->nge_port_cnt == 1) {
6590 bp->nge_port = ti->port;
6591 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
6592 }
6593 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006594 default:
6595 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04006596 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006597
6598 schedule_work(&bp->sp_task);
6599}
6600
6601static void bnxt_udp_tunnel_del(struct net_device *dev,
6602 struct udp_tunnel_info *ti)
6603{
6604 struct bnxt *bp = netdev_priv(dev);
6605
6606 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6607 return;
6608
6609 if (!netif_running(dev))
6610 return;
6611
6612 switch (ti->type) {
6613 case UDP_TUNNEL_TYPE_VXLAN:
6614 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
6615 return;
6616 bp->vxlan_port_cnt--;
6617
6618 if (bp->vxlan_port_cnt != 0)
6619 return;
6620
6621 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
6622 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006623 case UDP_TUNNEL_TYPE_GENEVE:
6624 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
6625 return;
6626 bp->nge_port_cnt--;
6627
6628 if (bp->nge_port_cnt != 0)
6629 return;
6630
6631 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
6632 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006633 default:
6634 return;
6635 }
6636
6637 schedule_work(&bp->sp_task);
Michael Chanc0c050c2015-10-22 16:01:17 -04006638}
6639
6640static const struct net_device_ops bnxt_netdev_ops = {
6641 .ndo_open = bnxt_open,
6642 .ndo_start_xmit = bnxt_start_xmit,
6643 .ndo_stop = bnxt_close,
6644 .ndo_get_stats64 = bnxt_get_stats64,
6645 .ndo_set_rx_mode = bnxt_set_rx_mode,
6646 .ndo_do_ioctl = bnxt_ioctl,
6647 .ndo_validate_addr = eth_validate_addr,
6648 .ndo_set_mac_address = bnxt_change_mac_addr,
6649 .ndo_change_mtu = bnxt_change_mtu,
6650 .ndo_fix_features = bnxt_fix_features,
6651 .ndo_set_features = bnxt_set_features,
6652 .ndo_tx_timeout = bnxt_tx_timeout,
6653#ifdef CONFIG_BNXT_SRIOV
6654 .ndo_get_vf_config = bnxt_get_vf_config,
6655 .ndo_set_vf_mac = bnxt_set_vf_mac,
6656 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
6657 .ndo_set_vf_rate = bnxt_set_vf_bw,
6658 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
6659 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
6660#endif
6661#ifdef CONFIG_NET_POLL_CONTROLLER
6662 .ndo_poll_controller = bnxt_poll_controller,
6663#endif
6664 .ndo_setup_tc = bnxt_setup_tc,
6665#ifdef CONFIG_RFS_ACCEL
6666 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
6667#endif
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006668 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
6669 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
Michael Chanc0c050c2015-10-22 16:01:17 -04006670#ifdef CONFIG_NET_RX_BUSY_POLL
6671 .ndo_busy_poll = bnxt_busy_poll,
6672#endif
6673};
6674
6675static void bnxt_remove_one(struct pci_dev *pdev)
6676{
6677 struct net_device *dev = pci_get_drvdata(pdev);
6678 struct bnxt *bp = netdev_priv(dev);
6679
6680 if (BNXT_PF(bp))
6681 bnxt_sriov_disable(bp);
6682
Satish Baddipadige6316ea62016-03-07 15:38:48 -05006683 pci_disable_pcie_error_reporting(pdev);
Michael Chanc0c050c2015-10-22 16:01:17 -04006684 unregister_netdev(dev);
6685 cancel_work_sync(&bp->sp_task);
6686 bp->sp_event = 0;
6687
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05006688 bnxt_hwrm_func_drv_unrgtr(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006689 bnxt_free_hwrm_resources(bp);
Michael Chan7df4ae92016-12-02 21:17:17 -05006690 bnxt_dcb_free(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006691 pci_iounmap(pdev, bp->bar2);
6692 pci_iounmap(pdev, bp->bar1);
6693 pci_iounmap(pdev, bp->bar0);
6694 free_netdev(dev);
6695
6696 pci_release_regions(pdev);
6697 pci_disable_device(pdev);
6698}
6699
6700static int bnxt_probe_phy(struct bnxt *bp)
6701{
6702 int rc = 0;
6703 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chanc0c050c2015-10-22 16:01:17 -04006704
Michael Chan170ce012016-04-05 14:08:57 -04006705 rc = bnxt_hwrm_phy_qcaps(bp);
6706 if (rc) {
6707 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
6708 rc);
6709 return rc;
6710 }
6711
Michael Chanc0c050c2015-10-22 16:01:17 -04006712 rc = bnxt_update_link(bp, false);
6713 if (rc) {
6714 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
6715 rc);
6716 return rc;
6717 }
6718
Michael Chan93ed8112016-06-13 02:25:37 -04006719 /* Older firmware does not have supported_auto_speeds, so assume
6720 * that all supported speeds can be autonegotiated.
6721 */
6722 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
6723 link_info->support_auto_speeds = link_info->support_speeds;
6724
Michael Chanc0c050c2015-10-22 16:01:17 -04006725 /*initialize the ethool setting copy with NVM settings */
Michael Chan0d8abf02016-02-10 17:33:47 -05006726 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
Michael Chanc9ee9512016-04-05 14:08:56 -04006727 link_info->autoneg = BNXT_AUTONEG_SPEED;
6728 if (bp->hwrm_spec_code >= 0x10201) {
6729 if (link_info->auto_pause_setting &
6730 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
6731 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6732 } else {
6733 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6734 }
Michael Chan0d8abf02016-02-10 17:33:47 -05006735 link_info->advertising = link_info->auto_link_speeds;
Michael Chan0d8abf02016-02-10 17:33:47 -05006736 } else {
6737 link_info->req_link_speed = link_info->force_link_speed;
6738 link_info->req_duplex = link_info->duplex_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04006739 }
Michael Chanc9ee9512016-04-05 14:08:56 -04006740 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
6741 link_info->req_flow_ctrl =
6742 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
6743 else
6744 link_info->req_flow_ctrl = link_info->force_pause_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04006745 return rc;
6746}
6747
6748static int bnxt_get_max_irq(struct pci_dev *pdev)
6749{
6750 u16 ctrl;
6751
6752 if (!pdev->msix_cap)
6753 return 1;
6754
6755 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
6756 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
6757}
6758
Michael Chan6e6c5a52016-01-02 23:45:02 -05006759static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
6760 int *max_cp)
Michael Chanc0c050c2015-10-22 16:01:17 -04006761{
Michael Chan6e6c5a52016-01-02 23:45:02 -05006762 int max_ring_grps = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006763
Michael Chan379a80a2015-10-23 15:06:19 -04006764#ifdef CONFIG_BNXT_SRIOV
Arnd Bergmann415b6f12016-01-12 16:05:08 +01006765 if (!BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04006766 *max_tx = bp->vf.max_tx_rings;
6767 *max_rx = bp->vf.max_rx_rings;
Michael Chan6e6c5a52016-01-02 23:45:02 -05006768 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
6769 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
Michael Chanb72d4a62015-12-27 18:19:27 -05006770 max_ring_grps = bp->vf.max_hw_ring_grps;
Arnd Bergmann415b6f12016-01-12 16:05:08 +01006771 } else
Michael Chan379a80a2015-10-23 15:06:19 -04006772#endif
Arnd Bergmann415b6f12016-01-12 16:05:08 +01006773 {
6774 *max_tx = bp->pf.max_tx_rings;
6775 *max_rx = bp->pf.max_rx_rings;
6776 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
6777 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
6778 max_ring_grps = bp->pf.max_hw_ring_grps;
Michael Chanc0c050c2015-10-22 16:01:17 -04006779 }
Prashant Sreedharan76595192016-07-18 07:15:22 -04006780 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
6781 *max_cp -= 1;
6782 *max_rx -= 2;
6783 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006784 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6785 *max_rx >>= 1;
Michael Chanb72d4a62015-12-27 18:19:27 -05006786 *max_rx = min_t(int, *max_rx, max_ring_grps);
Michael Chan6e6c5a52016-01-02 23:45:02 -05006787}
6788
6789int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
6790{
6791 int rx, tx, cp;
6792
6793 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
6794 if (!rx || !tx || !cp)
6795 return -ENOMEM;
6796
6797 *max_rx = rx;
6798 *max_tx = tx;
6799 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
6800}
6801
6802static int bnxt_set_dflt_rings(struct bnxt *bp)
6803{
6804 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6805 bool sh = true;
6806
6807 if (sh)
6808 bp->flags |= BNXT_FLAG_SHARED_RINGS;
6809 dflt_rings = netif_get_num_default_rss_queues();
6810 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6811 if (rc)
6812 return rc;
6813 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
6814 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
6815 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6816 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6817 bp->tx_nr_rings + bp->rx_nr_rings;
6818 bp->num_stat_ctxs = bp->cp_nr_rings;
Prashant Sreedharan76595192016-07-18 07:15:22 -04006819 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6820 bp->rx_nr_rings++;
6821 bp->cp_nr_rings++;
6822 }
Michael Chan6e6c5a52016-01-02 23:45:02 -05006823 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006824}
6825
Ajit Khaparde90c4f782016-05-15 03:04:45 -04006826static void bnxt_parse_log_pcie_link(struct bnxt *bp)
6827{
6828 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
6829 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
6830
6831 if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
6832 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
6833 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
6834 else
6835 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
6836 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
6837 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
6838 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
6839 "Unknown", width);
6840}
6841
Michael Chanc0c050c2015-10-22 16:01:17 -04006842static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6843{
6844 static int version_printed;
6845 struct net_device *dev;
6846 struct bnxt *bp;
Michael Chan6e6c5a52016-01-02 23:45:02 -05006847 int rc, max_irqs;
Michael Chanc0c050c2015-10-22 16:01:17 -04006848
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -04006849 if (pdev->device == 0x16cd && pci_is_bridge(pdev))
6850 return -ENODEV;
6851
Michael Chanc0c050c2015-10-22 16:01:17 -04006852 if (version_printed++ == 0)
6853 pr_info("%s", version);
6854
6855 max_irqs = bnxt_get_max_irq(pdev);
6856 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
6857 if (!dev)
6858 return -ENOMEM;
6859
6860 bp = netdev_priv(dev);
6861
6862 if (bnxt_vf_pciid(ent->driver_data))
6863 bp->flags |= BNXT_FLAG_VF;
6864
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006865 if (pdev->msix_cap)
Michael Chanc0c050c2015-10-22 16:01:17 -04006866 bp->flags |= BNXT_FLAG_MSIX_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -04006867
6868 rc = bnxt_init_board(pdev, dev);
6869 if (rc < 0)
6870 goto init_err_free;
6871
6872 dev->netdev_ops = &bnxt_netdev_ops;
6873 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
6874 dev->ethtool_ops = &bnxt_ethtool_ops;
6875
6876 pci_set_drvdata(pdev, dev);
6877
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006878 rc = bnxt_alloc_hwrm_resources(bp);
6879 if (rc)
6880 goto init_err;
6881
6882 mutex_init(&bp->hwrm_cmd_lock);
6883 rc = bnxt_hwrm_ver_get(bp);
6884 if (rc)
6885 goto init_err;
6886
Rob Swindell5ac67d82016-09-19 03:58:03 -04006887 bnxt_hwrm_fw_set_time(bp);
6888
Michael Chanc0c050c2015-10-22 16:01:17 -04006889 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6890 NETIF_F_TSO | NETIF_F_TSO6 |
6891 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Tom Herbert7e133182016-05-18 09:06:10 -07006892 NETIF_F_GSO_IPXIP4 |
Alexander Duyck152971e2016-05-02 09:38:55 -07006893 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
6894 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006895 NETIF_F_RXCSUM | NETIF_F_GRO;
6896
6897 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6898 dev->hw_features |= NETIF_F_LRO;
Michael Chanc0c050c2015-10-22 16:01:17 -04006899
Michael Chanc0c050c2015-10-22 16:01:17 -04006900 dev->hw_enc_features =
6901 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6902 NETIF_F_TSO | NETIF_F_TSO6 |
6903 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Alexander Duyck152971e2016-05-02 09:38:55 -07006904 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07006905 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
Alexander Duyck152971e2016-05-02 09:38:55 -07006906 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
6907 NETIF_F_GSO_GRE_CSUM;
Michael Chanc0c050c2015-10-22 16:01:17 -04006908 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
6909 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
6910 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
6911 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
6912 dev->priv_flags |= IFF_UNICAST_FLT;
6913
Jarod Wilsone1c6dcc2016-10-17 15:54:04 -04006914 /* MTU range: 60 - 9500 */
6915 dev->min_mtu = ETH_ZLEN;
6916 dev->max_mtu = 9500;
6917
Michael Chan7df4ae92016-12-02 21:17:17 -05006918 bnxt_dcb_init(bp);
6919
Michael Chanc0c050c2015-10-22 16:01:17 -04006920#ifdef CONFIG_BNXT_SRIOV
6921 init_waitqueue_head(&bp->sriov_cfg_wait);
6922#endif
Michael Chan309369c2016-06-13 02:25:34 -04006923 bp->gro_func = bnxt_gro_func_5730x;
Michael Chan94758f82016-06-13 02:25:35 -04006924 if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
6925 bp->gro_func = bnxt_gro_func_5731x;
Michael Chan309369c2016-06-13 02:25:34 -04006926
Michael Chanc0c050c2015-10-22 16:01:17 -04006927 rc = bnxt_hwrm_func_drv_rgtr(bp);
6928 if (rc)
6929 goto init_err;
6930
6931 /* Get the MAX capabilities for this function */
6932 rc = bnxt_hwrm_func_qcaps(bp);
6933 if (rc) {
6934 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
6935 rc);
6936 rc = -1;
6937 goto init_err;
6938 }
6939
6940 rc = bnxt_hwrm_queue_qportcfg(bp);
6941 if (rc) {
6942 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
6943 rc);
6944 rc = -1;
6945 goto init_err;
6946 }
6947
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04006948 bnxt_hwrm_func_qcfg(bp);
6949
Michael Chanc0c050c2015-10-22 16:01:17 -04006950 bnxt_set_tpa_flags(bp);
6951 bnxt_set_ring_params(bp);
Jeffrey Huangbdd43472015-12-02 01:54:07 -05006952 if (BNXT_PF(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04006953 bp->pf.max_irqs = max_irqs;
Michael Chan379a80a2015-10-23 15:06:19 -04006954#if defined(CONFIG_BNXT_SRIOV)
Jeffrey Huangbdd43472015-12-02 01:54:07 -05006955 else
Michael Chanc0c050c2015-10-22 16:01:17 -04006956 bp->vf.max_irqs = max_irqs;
Michael Chan379a80a2015-10-23 15:06:19 -04006957#endif
Michael Chan6e6c5a52016-01-02 23:45:02 -05006958 bnxt_set_dflt_rings(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006959
Michael Chan87da7f72016-11-16 21:13:09 -05006960 /* Default RSS hash cfg. */
6961 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
6962 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
6963 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
6964 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
6965 if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) &&
6966 !BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6967 bp->hwrm_spec_code >= 0x10501) {
6968 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
6969 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
6970 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
6971 }
6972
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006973 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006974 dev->hw_features |= NETIF_F_NTUPLE;
6975 if (bnxt_rfs_capable(bp)) {
6976 bp->flags |= BNXT_FLAG_RFS;
6977 dev->features |= NETIF_F_NTUPLE;
6978 }
6979 }
6980
Michael Chanc0c050c2015-10-22 16:01:17 -04006981 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
6982 bp->flags |= BNXT_FLAG_STRIP_VLAN;
6983
6984 rc = bnxt_probe_phy(bp);
6985 if (rc)
6986 goto init_err;
6987
6988 rc = register_netdev(dev);
6989 if (rc)
6990 goto init_err;
6991
6992 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
6993 board_info[ent->driver_data].name,
6994 (long)pci_resource_start(pdev, 0), dev->dev_addr);
6995
Ajit Khaparde90c4f782016-05-15 03:04:45 -04006996 bnxt_parse_log_pcie_link(bp);
6997
Michael Chanc0c050c2015-10-22 16:01:17 -04006998 return 0;
6999
7000init_err:
7001 pci_iounmap(pdev, bp->bar0);
7002 pci_release_regions(pdev);
7003 pci_disable_device(pdev);
7004
7005init_err_free:
7006 free_netdev(dev);
7007 return rc;
7008}
7009
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007010/**
7011 * bnxt_io_error_detected - called when PCI error is detected
7012 * @pdev: Pointer to PCI device
7013 * @state: The current pci connection state
7014 *
7015 * This function is called after a PCI bus error affecting
7016 * this device has been detected.
7017 */
7018static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
7019 pci_channel_state_t state)
7020{
7021 struct net_device *netdev = pci_get_drvdata(pdev);
Michael Chan2a5bedf2016-07-01 18:46:21 -04007022 struct bnxt *bp = netdev_priv(netdev);
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007023
7024 netdev_info(netdev, "PCI I/O error detected\n");
7025
7026 rtnl_lock();
7027 netif_device_detach(netdev);
7028
7029 if (state == pci_channel_io_perm_failure) {
7030 rtnl_unlock();
7031 return PCI_ERS_RESULT_DISCONNECT;
7032 }
7033
7034 if (netif_running(netdev))
7035 bnxt_close(netdev);
7036
Michael Chan2a5bedf2016-07-01 18:46:21 -04007037 /* So that func_reset will be done during slot_reset */
7038 clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007039 pci_disable_device(pdev);
7040 rtnl_unlock();
7041
7042 /* Request a slot slot reset. */
7043 return PCI_ERS_RESULT_NEED_RESET;
7044}
7045
7046/**
7047 * bnxt_io_slot_reset - called after the pci bus has been reset.
7048 * @pdev: Pointer to PCI device
7049 *
7050 * Restart the card from scratch, as if from a cold-boot.
7051 * At this point, the card has exprienced a hard reset,
7052 * followed by fixups by BIOS, and has its config space
7053 * set up identically to what it was at cold boot.
7054 */
7055static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
7056{
7057 struct net_device *netdev = pci_get_drvdata(pdev);
7058 struct bnxt *bp = netdev_priv(netdev);
7059 int err = 0;
7060 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
7061
7062 netdev_info(bp->dev, "PCI Slot Reset\n");
7063
7064 rtnl_lock();
7065
7066 if (pci_enable_device(pdev)) {
7067 dev_err(&pdev->dev,
7068 "Cannot re-enable PCI device after reset.\n");
7069 } else {
7070 pci_set_master(pdev);
7071
7072 if (netif_running(netdev))
7073 err = bnxt_open(netdev);
7074
7075 if (!err)
7076 result = PCI_ERS_RESULT_RECOVERED;
7077 }
7078
7079 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
7080 dev_close(netdev);
7081
7082 rtnl_unlock();
7083
7084 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7085 if (err) {
7086 dev_err(&pdev->dev,
7087 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7088 err); /* non-fatal, continue */
7089 }
7090
7091 return PCI_ERS_RESULT_RECOVERED;
7092}
7093
7094/**
7095 * bnxt_io_resume - called when traffic can start flowing again.
7096 * @pdev: Pointer to PCI device
7097 *
7098 * This callback is called when the error recovery driver tells
7099 * us that its OK to resume normal operation.
7100 */
7101static void bnxt_io_resume(struct pci_dev *pdev)
7102{
7103 struct net_device *netdev = pci_get_drvdata(pdev);
7104
7105 rtnl_lock();
7106
7107 netif_device_attach(netdev);
7108
7109 rtnl_unlock();
7110}
7111
7112static const struct pci_error_handlers bnxt_err_handler = {
7113 .error_detected = bnxt_io_error_detected,
7114 .slot_reset = bnxt_io_slot_reset,
7115 .resume = bnxt_io_resume
7116};
7117
Michael Chanc0c050c2015-10-22 16:01:17 -04007118static struct pci_driver bnxt_pci_driver = {
7119 .name = DRV_MODULE_NAME,
7120 .id_table = bnxt_pci_tbl,
7121 .probe = bnxt_init_one,
7122 .remove = bnxt_remove_one,
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007123 .err_handler = &bnxt_err_handler,
Michael Chanc0c050c2015-10-22 16:01:17 -04007124#if defined(CONFIG_BNXT_SRIOV)
7125 .sriov_configure = bnxt_sriov_configure,
7126#endif
7127};
7128
7129module_pci_driver(bnxt_pci_driver);