blob: 338dbd03ff5860700903fef512d0293100f05f2f [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
Michael Chan11f15ed2016-04-05 14:08:55 -04003 * Copyright (c) 2014-2016 Broadcom Corporation
Michael Chanc0c050c2015-10-22 16:01:17 -04004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11
12#include <linux/stringify.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/errno.h>
16#include <linux/ioport.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/interrupt.h>
20#include <linux/pci.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/dma-mapping.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/delay.h>
29#include <asm/byteorder.h>
30#include <asm/page.h>
31#include <linux/time.h>
32#include <linux/mii.h>
33#include <linux/if.h>
34#include <linux/if_vlan.h>
Rob Swindell5ac67d82016-09-19 03:58:03 -040035#include <linux/rtc.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040036#include <net/ip.h>
37#include <net/tcp.h>
38#include <net/udp.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Alexander Duyckad51b8e2016-06-16 12:21:19 -070041#include <net/udp_tunnel.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040042#include <linux/workqueue.h>
43#include <linux/prefetch.h>
44#include <linux/cache.h>
45#include <linux/log2.h>
46#include <linux/aer.h>
47#include <linux/bitmap.h>
48#include <linux/cpu_rmap.h>
49
50#include "bnxt_hsi.h"
51#include "bnxt.h"
Michael Chana588e452016-12-07 00:26:21 -050052#include "bnxt_ulp.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040053#include "bnxt_sriov.h"
54#include "bnxt_ethtool.h"
Michael Chan7df4ae92016-12-02 21:17:17 -050055#include "bnxt_dcb.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040056
57#define BNXT_TX_TIMEOUT (5 * HZ)
58
59static const char version[] =
60 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
61
62MODULE_LICENSE("GPL");
63MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
64MODULE_VERSION(DRV_MODULE_VERSION);
65
66#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
67#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
68#define BNXT_RX_COPY_THRESH 256
69
Michael Chan4419dbe2016-02-10 17:33:49 -050070#define BNXT_TX_PUSH_THRESH 164
Michael Chanc0c050c2015-10-22 16:01:17 -040071
72enum board_idx {
David Christensenfbc9a522015-12-27 18:19:29 -050073 BCM57301,
Michael Chanc0c050c2015-10-22 16:01:17 -040074 BCM57302,
75 BCM57304,
Michael Chan1f681682016-07-25 12:33:37 -040076 BCM57417_NPAR,
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -040077 BCM58700,
Michael Chanb24eb6a2016-06-13 02:25:36 -040078 BCM57311,
79 BCM57312,
David Christensenfbc9a522015-12-27 18:19:29 -050080 BCM57402,
Michael Chanc0c050c2015-10-22 16:01:17 -040081 BCM57404,
82 BCM57406,
Michael Chan1f681682016-07-25 12:33:37 -040083 BCM57402_NPAR,
84 BCM57407,
Michael Chanb24eb6a2016-06-13 02:25:36 -040085 BCM57412,
86 BCM57414,
87 BCM57416,
88 BCM57417,
Michael Chan1f681682016-07-25 12:33:37 -040089 BCM57412_NPAR,
Michael Chan5049e332016-05-15 03:04:50 -040090 BCM57314,
Michael Chan1f681682016-07-25 12:33:37 -040091 BCM57417_SFP,
92 BCM57416_SFP,
93 BCM57404_NPAR,
94 BCM57406_NPAR,
95 BCM57407_SFP,
Michael Chanadbc8302016-09-19 03:58:01 -040096 BCM57407_NPAR,
Michael Chan1f681682016-07-25 12:33:37 -040097 BCM57414_NPAR,
98 BCM57416_NPAR,
Michael Chanadbc8302016-09-19 03:58:01 -040099 NETXTREME_E_VF,
100 NETXTREME_C_VF,
Michael Chanc0c050c2015-10-22 16:01:17 -0400101};
102
103/* indexed by enum above */
104static const struct {
105 char *name;
106} board_info[] = {
Michael Chanadbc8302016-09-19 03:58:01 -0400107 { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
108 { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
109 { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400110 { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400111 { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
112 { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
113 { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
114 { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
115 { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
116 { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400117 { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400118 { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
119 { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
120 { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
121 { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
122 { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400123 { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400124 { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
125 { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
126 { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400127 { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
128 { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400129 { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
130 { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
Michael Chan1f681682016-07-25 12:33:37 -0400131 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
132 { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400133 { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134 { "Broadcom NetXtreme-C Ethernet Virtual Function" },
Michael Chanc0c050c2015-10-22 16:01:17 -0400135};
136
137static const struct pci_device_id bnxt_pci_tbl[] = {
Michael Chanadbc8302016-09-19 03:58:01 -0400138 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
David Christensenfbc9a522015-12-27 18:19:29 -0500139 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400140 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
141 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
Michael Chan1f681682016-07-25 12:33:37 -0400142 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -0400143 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400144 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
145 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
David Christensenfbc9a522015-12-27 18:19:29 -0500146 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400147 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
148 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
Michael Chan1f681682016-07-25 12:33:37 -0400149 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
150 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400151 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
152 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
153 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
154 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
Michael Chan1f681682016-07-25 12:33:37 -0400155 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
Michael Chan5049e332016-05-15 03:04:50 -0400156 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
Michael Chan1f681682016-07-25 12:33:37 -0400157 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
158 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
159 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
160 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
161 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
Michael Chanadbc8302016-09-19 03:58:01 -0400162 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
163 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400164 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400165 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400166 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400167 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
Michael Chanc0c050c2015-10-22 16:01:17 -0400168#ifdef CONFIG_BNXT_SRIOV
Michael Chanadbc8302016-09-19 03:58:01 -0400169 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
170 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
171 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
172 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
173 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
174 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
Michael Chanc0c050c2015-10-22 16:01:17 -0400175#endif
176 { 0 }
177};
178
179MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
180
181static const u16 bnxt_vf_req_snif[] = {
182 HWRM_FUNC_CFG,
183 HWRM_PORT_PHY_QCFG,
184 HWRM_CFA_L2_FILTER_ALLOC,
185};
186
Michael Chan25be8622016-04-05 14:09:00 -0400187static const u16 bnxt_async_events_arr[] = {
Michael Chan87c374d2016-12-02 21:17:16 -0500188 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
189 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
190 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
191 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
192 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
Michael Chan25be8622016-04-05 14:09:00 -0400193};
194
Michael Chanc0c050c2015-10-22 16:01:17 -0400195static bool bnxt_vf_pciid(enum board_idx idx)
196{
Michael Chanadbc8302016-09-19 03:58:01 -0400197 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
Michael Chanc0c050c2015-10-22 16:01:17 -0400198}
199
200#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
201#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
202#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
203
204#define BNXT_CP_DB_REARM(db, raw_cons) \
205 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
206
207#define BNXT_CP_DB(db, raw_cons) \
208 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
209
210#define BNXT_CP_DB_IRQ_DIS(db) \
211 writel(DB_CP_IRQ_DIS_FLAGS, db)
212
213static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
214{
215 /* Tell compiler to fetch tx indices from memory. */
216 barrier();
217
218 return bp->tx_ring_size -
219 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
220}
221
222static const u16 bnxt_lhint_arr[] = {
223 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
224 TX_BD_FLAGS_LHINT_512_TO_1023,
225 TX_BD_FLAGS_LHINT_1024_TO_2047,
226 TX_BD_FLAGS_LHINT_1024_TO_2047,
227 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
228 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
229 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
230 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
231 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
232 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
233 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
234 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
235 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
236 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
237 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
238 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
239 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
240 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
241 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
242};
243
244static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
245{
246 struct bnxt *bp = netdev_priv(dev);
247 struct tx_bd *txbd;
248 struct tx_bd_ext *txbd1;
249 struct netdev_queue *txq;
250 int i;
251 dma_addr_t mapping;
252 unsigned int length, pad = 0;
253 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
254 u16 prod, last_frag;
255 struct pci_dev *pdev = bp->pdev;
Michael Chanc0c050c2015-10-22 16:01:17 -0400256 struct bnxt_tx_ring_info *txr;
257 struct bnxt_sw_tx_bd *tx_buf;
258
259 i = skb_get_queue_mapping(skb);
260 if (unlikely(i >= bp->tx_nr_rings)) {
261 dev_kfree_skb_any(skb);
262 return NETDEV_TX_OK;
263 }
264
Michael Chanb6ab4b02016-01-02 23:44:59 -0500265 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -0400266 txq = netdev_get_tx_queue(dev, i);
267 prod = txr->tx_prod;
268
269 free_size = bnxt_tx_avail(bp, txr);
270 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
271 netif_tx_stop_queue(txq);
272 return NETDEV_TX_BUSY;
273 }
274
275 length = skb->len;
276 len = skb_headlen(skb);
277 last_frag = skb_shinfo(skb)->nr_frags;
278
279 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
280
281 txbd->tx_bd_opaque = prod;
282
283 tx_buf = &txr->tx_buf_ring[prod];
284 tx_buf->skb = skb;
285 tx_buf->nr_frags = last_frag;
286
287 vlan_tag_flags = 0;
288 cfa_action = 0;
289 if (skb_vlan_tag_present(skb)) {
290 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
291 skb_vlan_tag_get(skb);
292 /* Currently supports 8021Q, 8021AD vlan offloads
293 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
294 */
295 if (skb->vlan_proto == htons(ETH_P_8021Q))
296 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
297 }
298
299 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
Michael Chan4419dbe2016-02-10 17:33:49 -0500300 struct tx_push_buffer *tx_push_buf = txr->tx_push;
301 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
302 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
303 void *pdata = tx_push_buf->data;
304 u64 *end;
305 int j, push_len;
Michael Chanc0c050c2015-10-22 16:01:17 -0400306
307 /* Set COAL_NOW to be ready quickly for the next push */
308 tx_push->tx_bd_len_flags_type =
309 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
310 TX_BD_TYPE_LONG_TX_BD |
311 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
312 TX_BD_FLAGS_COAL_NOW |
313 TX_BD_FLAGS_PACKET_END |
314 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
315
316 if (skb->ip_summed == CHECKSUM_PARTIAL)
317 tx_push1->tx_bd_hsize_lflags =
318 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
319 else
320 tx_push1->tx_bd_hsize_lflags = 0;
321
322 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
323 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
324
Michael Chanfbb0fa82016-02-22 02:10:26 -0500325 end = pdata + length;
326 end = PTR_ALIGN(end, 8) - 1;
Michael Chan4419dbe2016-02-10 17:33:49 -0500327 *end = 0;
328
Michael Chanc0c050c2015-10-22 16:01:17 -0400329 skb_copy_from_linear_data(skb, pdata, len);
330 pdata += len;
331 for (j = 0; j < last_frag; j++) {
332 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
333 void *fptr;
334
335 fptr = skb_frag_address_safe(frag);
336 if (!fptr)
337 goto normal_tx;
338
339 memcpy(pdata, fptr, skb_frag_size(frag));
340 pdata += skb_frag_size(frag);
341 }
342
Michael Chan4419dbe2016-02-10 17:33:49 -0500343 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
344 txbd->tx_bd_haddr = txr->data_mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400345 prod = NEXT_TX(prod);
346 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
347 memcpy(txbd, tx_push1, sizeof(*txbd));
348 prod = NEXT_TX(prod);
Michael Chan4419dbe2016-02-10 17:33:49 -0500349 tx_push->doorbell =
Michael Chanc0c050c2015-10-22 16:01:17 -0400350 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
351 txr->tx_prod = prod;
352
Michael Chanb9a84602016-06-06 02:37:14 -0400353 tx_buf->is_push = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -0400354 netdev_tx_sent_queue(txq, skb->len);
Michael Chanb9a84602016-06-06 02:37:14 -0400355 wmb(); /* Sync is_push and byte queue before pushing data */
Michael Chanc0c050c2015-10-22 16:01:17 -0400356
Michael Chan4419dbe2016-02-10 17:33:49 -0500357 push_len = (length + sizeof(*tx_push) + 7) / 8;
358 if (push_len > 16) {
359 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
Michael Chan9d137442016-09-05 01:57:35 -0400360 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
361 (push_len - 16) << 1);
Michael Chan4419dbe2016-02-10 17:33:49 -0500362 } else {
363 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
364 push_len);
365 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400366
Michael Chanc0c050c2015-10-22 16:01:17 -0400367 goto tx_done;
368 }
369
370normal_tx:
371 if (length < BNXT_MIN_PKT_SIZE) {
372 pad = BNXT_MIN_PKT_SIZE - length;
373 if (skb_pad(skb, pad)) {
374 /* SKB already freed. */
375 tx_buf->skb = NULL;
376 return NETDEV_TX_OK;
377 }
378 length = BNXT_MIN_PKT_SIZE;
379 }
380
381 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
382
383 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
384 dev_kfree_skb_any(skb);
385 tx_buf->skb = NULL;
386 return NETDEV_TX_OK;
387 }
388
389 dma_unmap_addr_set(tx_buf, mapping, mapping);
390 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
391 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
392
393 txbd->tx_bd_haddr = cpu_to_le64(mapping);
394
395 prod = NEXT_TX(prod);
396 txbd1 = (struct tx_bd_ext *)
397 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
398
399 txbd1->tx_bd_hsize_lflags = 0;
400 if (skb_is_gso(skb)) {
401 u32 hdr_len;
402
403 if (skb->encapsulation)
404 hdr_len = skb_inner_network_offset(skb) +
405 skb_inner_network_header_len(skb) +
406 inner_tcp_hdrlen(skb);
407 else
408 hdr_len = skb_transport_offset(skb) +
409 tcp_hdrlen(skb);
410
411 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
412 TX_BD_FLAGS_T_IPID |
413 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
414 length = skb_shinfo(skb)->gso_size;
415 txbd1->tx_bd_mss = cpu_to_le32(length);
416 length += hdr_len;
417 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
418 txbd1->tx_bd_hsize_lflags =
419 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
420 txbd1->tx_bd_mss = 0;
421 }
422
423 length >>= 9;
424 flags |= bnxt_lhint_arr[length];
425 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
426
427 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
428 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
429 for (i = 0; i < last_frag; i++) {
430 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
431
432 prod = NEXT_TX(prod);
433 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
434
435 len = skb_frag_size(frag);
436 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
437 DMA_TO_DEVICE);
438
439 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
440 goto tx_dma_error;
441
442 tx_buf = &txr->tx_buf_ring[prod];
443 dma_unmap_addr_set(tx_buf, mapping, mapping);
444
445 txbd->tx_bd_haddr = cpu_to_le64(mapping);
446
447 flags = len << TX_BD_LEN_SHIFT;
448 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
449 }
450
451 flags &= ~TX_BD_LEN;
452 txbd->tx_bd_len_flags_type =
453 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
454 TX_BD_FLAGS_PACKET_END);
455
456 netdev_tx_sent_queue(txq, skb->len);
457
458 /* Sync BD data before updating doorbell */
459 wmb();
460
461 prod = NEXT_TX(prod);
462 txr->tx_prod = prod;
463
464 writel(DB_KEY_TX | prod, txr->tx_doorbell);
465 writel(DB_KEY_TX | prod, txr->tx_doorbell);
466
467tx_done:
468
469 mmiowb();
470
471 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
472 netif_tx_stop_queue(txq);
473
474 /* netif_tx_stop_queue() must be done before checking
475 * tx index in bnxt_tx_avail() below, because in
476 * bnxt_tx_int(), we update tx index before checking for
477 * netif_tx_queue_stopped().
478 */
479 smp_mb();
480 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
481 netif_tx_wake_queue(txq);
482 }
483 return NETDEV_TX_OK;
484
485tx_dma_error:
486 last_frag = i;
487
488 /* start back at beginning and unmap skb */
489 prod = txr->tx_prod;
490 tx_buf = &txr->tx_buf_ring[prod];
491 tx_buf->skb = NULL;
492 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
493 skb_headlen(skb), PCI_DMA_TODEVICE);
494 prod = NEXT_TX(prod);
495
496 /* unmap remaining mapped pages */
497 for (i = 0; i < last_frag; i++) {
498 prod = NEXT_TX(prod);
499 tx_buf = &txr->tx_buf_ring[prod];
500 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
501 skb_frag_size(&skb_shinfo(skb)->frags[i]),
502 PCI_DMA_TODEVICE);
503 }
504
505 dev_kfree_skb_any(skb);
506 return NETDEV_TX_OK;
507}
508
509static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
510{
Michael Chanb6ab4b02016-01-02 23:44:59 -0500511 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chanb81a90d2016-01-02 23:45:01 -0500512 int index = txr - &bp->tx_ring[0];
Michael Chanc0c050c2015-10-22 16:01:17 -0400513 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
514 u16 cons = txr->tx_cons;
515 struct pci_dev *pdev = bp->pdev;
516 int i;
517 unsigned int tx_bytes = 0;
518
519 for (i = 0; i < nr_pkts; i++) {
520 struct bnxt_sw_tx_bd *tx_buf;
521 struct sk_buff *skb;
522 int j, last;
523
524 tx_buf = &txr->tx_buf_ring[cons];
525 cons = NEXT_TX(cons);
526 skb = tx_buf->skb;
527 tx_buf->skb = NULL;
528
529 if (tx_buf->is_push) {
530 tx_buf->is_push = 0;
531 goto next_tx_int;
532 }
533
534 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
535 skb_headlen(skb), PCI_DMA_TODEVICE);
536 last = tx_buf->nr_frags;
537
538 for (j = 0; j < last; j++) {
539 cons = NEXT_TX(cons);
540 tx_buf = &txr->tx_buf_ring[cons];
541 dma_unmap_page(
542 &pdev->dev,
543 dma_unmap_addr(tx_buf, mapping),
544 skb_frag_size(&skb_shinfo(skb)->frags[j]),
545 PCI_DMA_TODEVICE);
546 }
547
548next_tx_int:
549 cons = NEXT_TX(cons);
550
551 tx_bytes += skb->len;
552 dev_kfree_skb_any(skb);
553 }
554
555 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
556 txr->tx_cons = cons;
557
558 /* Need to make the tx_cons update visible to bnxt_start_xmit()
559 * before checking for netif_tx_queue_stopped(). Without the
560 * memory barrier, there is a small possibility that bnxt_start_xmit()
561 * will miss it and cause the queue to be stopped forever.
562 */
563 smp_mb();
564
565 if (unlikely(netif_tx_queue_stopped(txq)) &&
566 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
567 __netif_tx_lock(txq, smp_processor_id());
568 if (netif_tx_queue_stopped(txq) &&
569 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
570 txr->dev_state != BNXT_DEV_STATE_CLOSING)
571 netif_tx_wake_queue(txq);
572 __netif_tx_unlock(txq);
573 }
574}
575
576static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
577 gfp_t gfp)
578{
579 u8 *data;
580 struct pci_dev *pdev = bp->pdev;
581
582 data = kmalloc(bp->rx_buf_size, gfp);
583 if (!data)
584 return NULL;
585
586 *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
587 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
588
589 if (dma_mapping_error(&pdev->dev, *mapping)) {
590 kfree(data);
591 data = NULL;
592 }
593 return data;
594}
595
596static inline int bnxt_alloc_rx_data(struct bnxt *bp,
597 struct bnxt_rx_ring_info *rxr,
598 u16 prod, gfp_t gfp)
599{
600 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
601 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
602 u8 *data;
603 dma_addr_t mapping;
604
605 data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
606 if (!data)
607 return -ENOMEM;
608
609 rx_buf->data = data;
610 dma_unmap_addr_set(rx_buf, mapping, mapping);
611
612 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
613
614 return 0;
615}
616
617static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
618 u8 *data)
619{
620 u16 prod = rxr->rx_prod;
621 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
622 struct rx_bd *cons_bd, *prod_bd;
623
624 prod_rx_buf = &rxr->rx_buf_ring[prod];
625 cons_rx_buf = &rxr->rx_buf_ring[cons];
626
627 prod_rx_buf->data = data;
628
629 dma_unmap_addr_set(prod_rx_buf, mapping,
630 dma_unmap_addr(cons_rx_buf, mapping));
631
632 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
633 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
634
635 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
636}
637
638static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
639{
640 u16 next, max = rxr->rx_agg_bmap_size;
641
642 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
643 if (next >= max)
644 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
645 return next;
646}
647
648static inline int bnxt_alloc_rx_page(struct bnxt *bp,
649 struct bnxt_rx_ring_info *rxr,
650 u16 prod, gfp_t gfp)
651{
652 struct rx_bd *rxbd =
653 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
654 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
655 struct pci_dev *pdev = bp->pdev;
656 struct page *page;
657 dma_addr_t mapping;
658 u16 sw_prod = rxr->rx_sw_agg_prod;
Michael Chan89d0a062016-04-25 02:30:51 -0400659 unsigned int offset = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -0400660
Michael Chan89d0a062016-04-25 02:30:51 -0400661 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
662 page = rxr->rx_page;
663 if (!page) {
664 page = alloc_page(gfp);
665 if (!page)
666 return -ENOMEM;
667 rxr->rx_page = page;
668 rxr->rx_page_offset = 0;
669 }
670 offset = rxr->rx_page_offset;
671 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
672 if (rxr->rx_page_offset == PAGE_SIZE)
673 rxr->rx_page = NULL;
674 else
675 get_page(page);
676 } else {
677 page = alloc_page(gfp);
678 if (!page)
679 return -ENOMEM;
680 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400681
Michael Chan89d0a062016-04-25 02:30:51 -0400682 mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
Michael Chanc0c050c2015-10-22 16:01:17 -0400683 PCI_DMA_FROMDEVICE);
684 if (dma_mapping_error(&pdev->dev, mapping)) {
685 __free_page(page);
686 return -EIO;
687 }
688
689 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
690 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
691
692 __set_bit(sw_prod, rxr->rx_agg_bmap);
693 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
694 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
695
696 rx_agg_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400697 rx_agg_buf->offset = offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400698 rx_agg_buf->mapping = mapping;
699 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
700 rxbd->rx_bd_opaque = sw_prod;
701 return 0;
702}
703
704static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
705 u32 agg_bufs)
706{
707 struct bnxt *bp = bnapi->bp;
708 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500709 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400710 u16 prod = rxr->rx_agg_prod;
711 u16 sw_prod = rxr->rx_sw_agg_prod;
712 u32 i;
713
714 for (i = 0; i < agg_bufs; i++) {
715 u16 cons;
716 struct rx_agg_cmp *agg;
717 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
718 struct rx_bd *prod_bd;
719 struct page *page;
720
721 agg = (struct rx_agg_cmp *)
722 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
723 cons = agg->rx_agg_cmp_opaque;
724 __clear_bit(cons, rxr->rx_agg_bmap);
725
726 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
727 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
728
729 __set_bit(sw_prod, rxr->rx_agg_bmap);
730 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
731 cons_rx_buf = &rxr->rx_agg_ring[cons];
732
733 /* It is possible for sw_prod to be equal to cons, so
734 * set cons_rx_buf->page to NULL first.
735 */
736 page = cons_rx_buf->page;
737 cons_rx_buf->page = NULL;
738 prod_rx_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400739 prod_rx_buf->offset = cons_rx_buf->offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400740
741 prod_rx_buf->mapping = cons_rx_buf->mapping;
742
743 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
744
745 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
746 prod_bd->rx_bd_opaque = sw_prod;
747
748 prod = NEXT_RX_AGG(prod);
749 sw_prod = NEXT_RX_AGG(sw_prod);
750 cp_cons = NEXT_CMP(cp_cons);
751 }
752 rxr->rx_agg_prod = prod;
753 rxr->rx_sw_agg_prod = sw_prod;
754}
755
756static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
757 struct bnxt_rx_ring_info *rxr, u16 cons,
758 u16 prod, u8 *data, dma_addr_t dma_addr,
759 unsigned int len)
760{
761 int err;
762 struct sk_buff *skb;
763
764 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
765 if (unlikely(err)) {
766 bnxt_reuse_rx_data(rxr, cons, data);
767 return NULL;
768 }
769
770 skb = build_skb(data, 0);
771 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
772 PCI_DMA_FROMDEVICE);
773 if (!skb) {
774 kfree(data);
775 return NULL;
776 }
777
778 skb_reserve(skb, BNXT_RX_OFFSET);
779 skb_put(skb, len);
780 return skb;
781}
782
783static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
784 struct sk_buff *skb, u16 cp_cons,
785 u32 agg_bufs)
786{
787 struct pci_dev *pdev = bp->pdev;
788 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500789 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400790 u16 prod = rxr->rx_agg_prod;
791 u32 i;
792
793 for (i = 0; i < agg_bufs; i++) {
794 u16 cons, frag_len;
795 struct rx_agg_cmp *agg;
796 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
797 struct page *page;
798 dma_addr_t mapping;
799
800 agg = (struct rx_agg_cmp *)
801 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
802 cons = agg->rx_agg_cmp_opaque;
803 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
804 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
805
806 cons_rx_buf = &rxr->rx_agg_ring[cons];
Michael Chan89d0a062016-04-25 02:30:51 -0400807 skb_fill_page_desc(skb, i, cons_rx_buf->page,
808 cons_rx_buf->offset, frag_len);
Michael Chanc0c050c2015-10-22 16:01:17 -0400809 __clear_bit(cons, rxr->rx_agg_bmap);
810
811 /* It is possible for bnxt_alloc_rx_page() to allocate
812 * a sw_prod index that equals the cons index, so we
813 * need to clear the cons entry now.
814 */
815 mapping = dma_unmap_addr(cons_rx_buf, mapping);
816 page = cons_rx_buf->page;
817 cons_rx_buf->page = NULL;
818
819 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
820 struct skb_shared_info *shinfo;
821 unsigned int nr_frags;
822
823 shinfo = skb_shinfo(skb);
824 nr_frags = --shinfo->nr_frags;
825 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
826
827 dev_kfree_skb(skb);
828
829 cons_rx_buf->page = page;
830
831 /* Update prod since possibly some pages have been
832 * allocated already.
833 */
834 rxr->rx_agg_prod = prod;
835 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
836 return NULL;
837 }
838
Michael Chan2839f282016-04-25 02:30:50 -0400839 dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
Michael Chanc0c050c2015-10-22 16:01:17 -0400840 PCI_DMA_FROMDEVICE);
841
842 skb->data_len += frag_len;
843 skb->len += frag_len;
844 skb->truesize += PAGE_SIZE;
845
846 prod = NEXT_RX_AGG(prod);
847 cp_cons = NEXT_CMP(cp_cons);
848 }
849 rxr->rx_agg_prod = prod;
850 return skb;
851}
852
853static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
854 u8 agg_bufs, u32 *raw_cons)
855{
856 u16 last;
857 struct rx_agg_cmp *agg;
858
859 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
860 last = RING_CMP(*raw_cons);
861 agg = (struct rx_agg_cmp *)
862 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
863 return RX_AGG_CMP_VALID(agg, *raw_cons);
864}
865
866static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
867 unsigned int len,
868 dma_addr_t mapping)
869{
870 struct bnxt *bp = bnapi->bp;
871 struct pci_dev *pdev = bp->pdev;
872 struct sk_buff *skb;
873
874 skb = napi_alloc_skb(&bnapi->napi, len);
875 if (!skb)
876 return NULL;
877
878 dma_sync_single_for_cpu(&pdev->dev, mapping,
879 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
880
881 memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
882
883 dma_sync_single_for_device(&pdev->dev, mapping,
884 bp->rx_copy_thresh,
885 PCI_DMA_FROMDEVICE);
886
887 skb_put(skb, len);
888 return skb;
889}
890
Michael Chanfa7e2812016-05-10 19:18:00 -0400891static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
892 u32 *raw_cons, void *cmp)
893{
894 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
895 struct rx_cmp *rxcmp = cmp;
896 u32 tmp_raw_cons = *raw_cons;
897 u8 cmp_type, agg_bufs = 0;
898
899 cmp_type = RX_CMP_TYPE(rxcmp);
900
901 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
902 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
903 RX_CMP_AGG_BUFS) >>
904 RX_CMP_AGG_BUFS_SHIFT;
905 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
906 struct rx_tpa_end_cmp *tpa_end = cmp;
907
908 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
909 RX_TPA_END_CMP_AGG_BUFS) >>
910 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
911 }
912
913 if (agg_bufs) {
914 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
915 return -EBUSY;
916 }
917 *raw_cons = tmp_raw_cons;
918 return 0;
919}
920
921static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
922{
923 if (!rxr->bnapi->in_reset) {
924 rxr->bnapi->in_reset = true;
925 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
926 schedule_work(&bp->sp_task);
927 }
928 rxr->rx_next_cons = 0xffff;
929}
930
Michael Chanc0c050c2015-10-22 16:01:17 -0400931static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
932 struct rx_tpa_start_cmp *tpa_start,
933 struct rx_tpa_start_cmp_ext *tpa_start1)
934{
935 u8 agg_id = TPA_START_AGG_ID(tpa_start);
936 u16 cons, prod;
937 struct bnxt_tpa_info *tpa_info;
938 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
939 struct rx_bd *prod_bd;
940 dma_addr_t mapping;
941
942 cons = tpa_start->rx_tpa_start_cmp_opaque;
943 prod = rxr->rx_prod;
944 cons_rx_buf = &rxr->rx_buf_ring[cons];
945 prod_rx_buf = &rxr->rx_buf_ring[prod];
946 tpa_info = &rxr->rx_tpa[agg_id];
947
Michael Chanfa7e2812016-05-10 19:18:00 -0400948 if (unlikely(cons != rxr->rx_next_cons)) {
949 bnxt_sched_reset(bp, rxr);
950 return;
951 }
952
Michael Chanc0c050c2015-10-22 16:01:17 -0400953 prod_rx_buf->data = tpa_info->data;
954
955 mapping = tpa_info->mapping;
956 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
957
958 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
959
960 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
961
962 tpa_info->data = cons_rx_buf->data;
963 cons_rx_buf->data = NULL;
964 tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
965
966 tpa_info->len =
967 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
968 RX_TPA_START_CMP_LEN_SHIFT;
969 if (likely(TPA_START_HASH_VALID(tpa_start))) {
970 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
971
972 tpa_info->hash_type = PKT_HASH_TYPE_L4;
973 tpa_info->gso_type = SKB_GSO_TCPV4;
974 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
975 if (hash_type == 3)
976 tpa_info->gso_type = SKB_GSO_TCPV6;
977 tpa_info->rss_hash =
978 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
979 } else {
980 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
981 tpa_info->gso_type = 0;
982 if (netif_msg_rx_err(bp))
983 netdev_warn(bp->dev, "TPA packet without valid hash\n");
984 }
985 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
986 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
Michael Chan94758f82016-06-13 02:25:35 -0400987 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
Michael Chanc0c050c2015-10-22 16:01:17 -0400988
989 rxr->rx_prod = NEXT_RX(prod);
990 cons = NEXT_RX(cons);
Michael Chan376a5b82016-05-10 19:17:59 -0400991 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -0400992 cons_rx_buf = &rxr->rx_buf_ring[cons];
993
994 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
995 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
996 cons_rx_buf->data = NULL;
997}
998
999static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1000 u16 cp_cons, u32 agg_bufs)
1001{
1002 if (agg_bufs)
1003 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1004}
1005
Michael Chan94758f82016-06-13 02:25:35 -04001006static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1007 int payload_off, int tcp_ts,
1008 struct sk_buff *skb)
1009{
1010#ifdef CONFIG_INET
1011 struct tcphdr *th;
1012 int len, nw_off;
1013 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1014 u32 hdr_info = tpa_info->hdr_info;
1015 bool loopback = false;
1016
1017 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1018 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1019 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1020
1021 /* If the packet is an internal loopback packet, the offsets will
1022 * have an extra 4 bytes.
1023 */
1024 if (inner_mac_off == 4) {
1025 loopback = true;
1026 } else if (inner_mac_off > 4) {
1027 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1028 ETH_HLEN - 2));
1029
1030 /* We only support inner iPv4/ipv6. If we don't see the
1031 * correct protocol ID, it must be a loopback packet where
1032 * the offsets are off by 4.
1033 */
Dan Carpenter09a76362016-07-07 11:23:09 +03001034 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
Michael Chan94758f82016-06-13 02:25:35 -04001035 loopback = true;
1036 }
1037 if (loopback) {
1038 /* internal loopback packet, subtract all offsets by 4 */
1039 inner_ip_off -= 4;
1040 inner_mac_off -= 4;
1041 outer_ip_off -= 4;
1042 }
1043
1044 nw_off = inner_ip_off - ETH_HLEN;
1045 skb_set_network_header(skb, nw_off);
1046 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1047 struct ipv6hdr *iph = ipv6_hdr(skb);
1048
1049 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1050 len = skb->len - skb_transport_offset(skb);
1051 th = tcp_hdr(skb);
1052 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1053 } else {
1054 struct iphdr *iph = ip_hdr(skb);
1055
1056 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1057 len = skb->len - skb_transport_offset(skb);
1058 th = tcp_hdr(skb);
1059 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1060 }
1061
1062 if (inner_mac_off) { /* tunnel */
1063 struct udphdr *uh = NULL;
1064 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1065 ETH_HLEN - 2));
1066
1067 if (proto == htons(ETH_P_IP)) {
1068 struct iphdr *iph = (struct iphdr *)skb->data;
1069
1070 if (iph->protocol == IPPROTO_UDP)
1071 uh = (struct udphdr *)(iph + 1);
1072 } else {
1073 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1074
1075 if (iph->nexthdr == IPPROTO_UDP)
1076 uh = (struct udphdr *)(iph + 1);
1077 }
1078 if (uh) {
1079 if (uh->check)
1080 skb_shinfo(skb)->gso_type |=
1081 SKB_GSO_UDP_TUNNEL_CSUM;
1082 else
1083 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1084 }
1085 }
1086#endif
1087 return skb;
1088}
1089
Michael Chanc0c050c2015-10-22 16:01:17 -04001090#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1091#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1092
Michael Chan309369c2016-06-13 02:25:34 -04001093static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1094 int payload_off, int tcp_ts,
Michael Chanc0c050c2015-10-22 16:01:17 -04001095 struct sk_buff *skb)
1096{
Michael Chand1611c32015-10-25 22:27:57 -04001097#ifdef CONFIG_INET
Michael Chanc0c050c2015-10-22 16:01:17 -04001098 struct tcphdr *th;
Michael Chan309369c2016-06-13 02:25:34 -04001099 int len, nw_off, tcp_opt_len;
Michael Chanc0c050c2015-10-22 16:01:17 -04001100
Michael Chan309369c2016-06-13 02:25:34 -04001101 if (tcp_ts)
Michael Chanc0c050c2015-10-22 16:01:17 -04001102 tcp_opt_len = 12;
1103
Michael Chanc0c050c2015-10-22 16:01:17 -04001104 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1105 struct iphdr *iph;
1106
1107 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1108 ETH_HLEN;
1109 skb_set_network_header(skb, nw_off);
1110 iph = ip_hdr(skb);
1111 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1112 len = skb->len - skb_transport_offset(skb);
1113 th = tcp_hdr(skb);
1114 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1115 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1116 struct ipv6hdr *iph;
1117
1118 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1119 ETH_HLEN;
1120 skb_set_network_header(skb, nw_off);
1121 iph = ipv6_hdr(skb);
1122 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1123 len = skb->len - skb_transport_offset(skb);
1124 th = tcp_hdr(skb);
1125 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1126 } else {
1127 dev_kfree_skb_any(skb);
1128 return NULL;
1129 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001130
1131 if (nw_off) { /* tunnel */
1132 struct udphdr *uh = NULL;
1133
1134 if (skb->protocol == htons(ETH_P_IP)) {
1135 struct iphdr *iph = (struct iphdr *)skb->data;
1136
1137 if (iph->protocol == IPPROTO_UDP)
1138 uh = (struct udphdr *)(iph + 1);
1139 } else {
1140 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1141
1142 if (iph->nexthdr == IPPROTO_UDP)
1143 uh = (struct udphdr *)(iph + 1);
1144 }
1145 if (uh) {
1146 if (uh->check)
1147 skb_shinfo(skb)->gso_type |=
1148 SKB_GSO_UDP_TUNNEL_CSUM;
1149 else
1150 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1151 }
1152 }
1153#endif
1154 return skb;
1155}
1156
Michael Chan309369c2016-06-13 02:25:34 -04001157static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1158 struct bnxt_tpa_info *tpa_info,
1159 struct rx_tpa_end_cmp *tpa_end,
1160 struct rx_tpa_end_cmp_ext *tpa_end1,
1161 struct sk_buff *skb)
1162{
1163#ifdef CONFIG_INET
1164 int payload_off;
1165 u16 segs;
1166
1167 segs = TPA_END_TPA_SEGS(tpa_end);
1168 if (segs == 1)
1169 return skb;
1170
1171 NAPI_GRO_CB(skb)->count = segs;
1172 skb_shinfo(skb)->gso_size =
1173 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1174 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1175 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1176 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1177 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1178 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
Michael Chan59109062016-12-29 12:13:35 -05001179 if (likely(skb))
1180 tcp_gro_complete(skb);
Michael Chan309369c2016-06-13 02:25:34 -04001181#endif
1182 return skb;
1183}
1184
Michael Chanc0c050c2015-10-22 16:01:17 -04001185static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1186 struct bnxt_napi *bnapi,
1187 u32 *raw_cons,
1188 struct rx_tpa_end_cmp *tpa_end,
1189 struct rx_tpa_end_cmp_ext *tpa_end1,
1190 bool *agg_event)
1191{
1192 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001193 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001194 u8 agg_id = TPA_END_AGG_ID(tpa_end);
1195 u8 *data, agg_bufs;
1196 u16 cp_cons = RING_CMP(*raw_cons);
1197 unsigned int len;
1198 struct bnxt_tpa_info *tpa_info;
1199 dma_addr_t mapping;
1200 struct sk_buff *skb;
1201
Michael Chanfa7e2812016-05-10 19:18:00 -04001202 if (unlikely(bnapi->in_reset)) {
1203 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1204
1205 if (rc < 0)
1206 return ERR_PTR(-EBUSY);
1207 return NULL;
1208 }
1209
Michael Chanc0c050c2015-10-22 16:01:17 -04001210 tpa_info = &rxr->rx_tpa[agg_id];
1211 data = tpa_info->data;
1212 prefetch(data);
1213 len = tpa_info->len;
1214 mapping = tpa_info->mapping;
1215
1216 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1217 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1218
1219 if (agg_bufs) {
1220 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1221 return ERR_PTR(-EBUSY);
1222
1223 *agg_event = true;
1224 cp_cons = NEXT_CMP(cp_cons);
1225 }
1226
1227 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
1228 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1229 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1230 agg_bufs, (int)MAX_SKB_FRAGS);
1231 return NULL;
1232 }
1233
1234 if (len <= bp->rx_copy_thresh) {
1235 skb = bnxt_copy_skb(bnapi, data, len, mapping);
1236 if (!skb) {
1237 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1238 return NULL;
1239 }
1240 } else {
1241 u8 *new_data;
1242 dma_addr_t new_mapping;
1243
1244 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1245 if (!new_data) {
1246 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1247 return NULL;
1248 }
1249
1250 tpa_info->data = new_data;
1251 tpa_info->mapping = new_mapping;
1252
1253 skb = build_skb(data, 0);
1254 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
1255 PCI_DMA_FROMDEVICE);
1256
1257 if (!skb) {
1258 kfree(data);
1259 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1260 return NULL;
1261 }
1262 skb_reserve(skb, BNXT_RX_OFFSET);
1263 skb_put(skb, len);
1264 }
1265
1266 if (agg_bufs) {
1267 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1268 if (!skb) {
1269 /* Page reuse already handled by bnxt_rx_pages(). */
1270 return NULL;
1271 }
1272 }
1273 skb->protocol = eth_type_trans(skb, bp->dev);
1274
1275 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1276 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1277
Michael Chan8852ddb2016-06-06 02:37:16 -04001278 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1279 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001280 u16 vlan_proto = tpa_info->metadata >>
1281 RX_CMP_FLAGS2_METADATA_TPID_SFT;
Michael Chan8852ddb2016-06-06 02:37:16 -04001282 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001283
Michael Chan8852ddb2016-06-06 02:37:16 -04001284 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001285 }
1286
1287 skb_checksum_none_assert(skb);
1288 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1289 skb->ip_summed = CHECKSUM_UNNECESSARY;
1290 skb->csum_level =
1291 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1292 }
1293
1294 if (TPA_END_GRO(tpa_end))
Michael Chan309369c2016-06-13 02:25:34 -04001295 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001296
1297 return skb;
1298}
1299
1300/* returns the following:
1301 * 1 - 1 packet successfully received
1302 * 0 - successful TPA_START, packet not completed yet
1303 * -EBUSY - completion ring does not have all the agg buffers yet
1304 * -ENOMEM - packet aborted due to out of memory
1305 * -EIO - packet aborted due to hw error indicated in BD
1306 */
1307static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1308 bool *agg_event)
1309{
1310 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001311 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001312 struct net_device *dev = bp->dev;
1313 struct rx_cmp *rxcmp;
1314 struct rx_cmp_ext *rxcmp1;
1315 u32 tmp_raw_cons = *raw_cons;
1316 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1317 struct bnxt_sw_rx_bd *rx_buf;
1318 unsigned int len;
1319 u8 *data, agg_bufs, cmp_type;
1320 dma_addr_t dma_addr;
1321 struct sk_buff *skb;
1322 int rc = 0;
1323
1324 rxcmp = (struct rx_cmp *)
1325 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1326
1327 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1328 cp_cons = RING_CMP(tmp_raw_cons);
1329 rxcmp1 = (struct rx_cmp_ext *)
1330 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1331
1332 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1333 return -EBUSY;
1334
1335 cmp_type = RX_CMP_TYPE(rxcmp);
1336
1337 prod = rxr->rx_prod;
1338
1339 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1340 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1341 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1342
1343 goto next_rx_no_prod;
1344
1345 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1346 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1347 (struct rx_tpa_end_cmp *)rxcmp,
1348 (struct rx_tpa_end_cmp_ext *)rxcmp1,
1349 agg_event);
1350
1351 if (unlikely(IS_ERR(skb)))
1352 return -EBUSY;
1353
1354 rc = -ENOMEM;
1355 if (likely(skb)) {
1356 skb_record_rx_queue(skb, bnapi->index);
Michael Chanb356a2e2016-12-29 12:13:31 -05001357 napi_gro_receive(&bnapi->napi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001358 rc = 1;
1359 }
1360 goto next_rx_no_prod;
1361 }
1362
1363 cons = rxcmp->rx_cmp_opaque;
1364 rx_buf = &rxr->rx_buf_ring[cons];
1365 data = rx_buf->data;
Michael Chanfa7e2812016-05-10 19:18:00 -04001366 if (unlikely(cons != rxr->rx_next_cons)) {
1367 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1368
1369 bnxt_sched_reset(bp, rxr);
1370 return rc1;
1371 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001372 prefetch(data);
1373
1374 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1375 RX_CMP_AGG_BUFS_SHIFT;
1376
1377 if (agg_bufs) {
1378 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1379 return -EBUSY;
1380
1381 cp_cons = NEXT_CMP(cp_cons);
1382 *agg_event = true;
1383 }
1384
1385 rx_buf->data = NULL;
1386 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1387 bnxt_reuse_rx_data(rxr, cons, data);
1388 if (agg_bufs)
1389 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1390
1391 rc = -EIO;
1392 goto next_rx;
1393 }
1394
1395 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1396 dma_addr = dma_unmap_addr(rx_buf, mapping);
1397
1398 if (len <= bp->rx_copy_thresh) {
1399 skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1400 bnxt_reuse_rx_data(rxr, cons, data);
1401 if (!skb) {
1402 rc = -ENOMEM;
1403 goto next_rx;
1404 }
1405 } else {
1406 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1407 if (!skb) {
1408 rc = -ENOMEM;
1409 goto next_rx;
1410 }
1411 }
1412
1413 if (agg_bufs) {
1414 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1415 if (!skb) {
1416 rc = -ENOMEM;
1417 goto next_rx;
1418 }
1419 }
1420
1421 if (RX_CMP_HASH_VALID(rxcmp)) {
1422 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1423 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1424
1425 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1426 if (hash_type != 1 && hash_type != 3)
1427 type = PKT_HASH_TYPE_L3;
1428 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1429 }
1430
1431 skb->protocol = eth_type_trans(skb, dev);
1432
Michael Chan8852ddb2016-06-06 02:37:16 -04001433 if ((rxcmp1->rx_cmp_flags2 &
1434 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1435 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001436 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
Michael Chan8852ddb2016-06-06 02:37:16 -04001437 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001438 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1439
Michael Chan8852ddb2016-06-06 02:37:16 -04001440 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001441 }
1442
1443 skb_checksum_none_assert(skb);
1444 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1445 if (dev->features & NETIF_F_RXCSUM) {
1446 skb->ip_summed = CHECKSUM_UNNECESSARY;
1447 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1448 }
1449 } else {
Satish Baddipadige665e3502015-12-27 18:19:21 -05001450 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1451 if (dev->features & NETIF_F_RXCSUM)
1452 cpr->rx_l4_csum_errors++;
1453 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001454 }
1455
1456 skb_record_rx_queue(skb, bnapi->index);
Michael Chanb356a2e2016-12-29 12:13:31 -05001457 napi_gro_receive(&bnapi->napi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001458 rc = 1;
1459
1460next_rx:
1461 rxr->rx_prod = NEXT_RX(prod);
Michael Chan376a5b82016-05-10 19:17:59 -04001462 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001463
1464next_rx_no_prod:
1465 *raw_cons = tmp_raw_cons;
1466
1467 return rc;
1468}
1469
Michael Chan4bb13ab2016-04-05 14:09:01 -04001470#define BNXT_GET_EVENT_PORT(data) \
Michael Chan87c374d2016-12-02 21:17:16 -05001471 ((data) & \
1472 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
Michael Chan4bb13ab2016-04-05 14:09:01 -04001473
Michael Chanc0c050c2015-10-22 16:01:17 -04001474static int bnxt_async_event_process(struct bnxt *bp,
1475 struct hwrm_async_event_cmpl *cmpl)
1476{
1477 u16 event_id = le16_to_cpu(cmpl->event_id);
1478
1479 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1480 switch (event_id) {
Michael Chan87c374d2016-12-02 21:17:16 -05001481 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
Michael Chan8cbde112016-04-11 04:11:14 -04001482 u32 data1 = le32_to_cpu(cmpl->event_data1);
1483 struct bnxt_link_info *link_info = &bp->link_info;
1484
1485 if (BNXT_VF(bp))
1486 goto async_event_process_exit;
1487 if (data1 & 0x20000) {
1488 u16 fw_speed = link_info->force_link_speed;
1489 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1490
1491 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1492 speed);
1493 }
Michael Chan286ef9d2016-11-16 21:13:08 -05001494 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
Michael Chan8cbde112016-04-11 04:11:14 -04001495 /* fall thru */
1496 }
Michael Chan87c374d2016-12-02 21:17:16 -05001497 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
Michael Chanc0c050c2015-10-22 16:01:17 -04001498 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
Jeffrey Huang19241362016-02-26 04:00:00 -05001499 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001500 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
Jeffrey Huang19241362016-02-26 04:00:00 -05001501 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001502 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001503 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
Michael Chan4bb13ab2016-04-05 14:09:01 -04001504 u32 data1 = le32_to_cpu(cmpl->event_data1);
1505 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1506
1507 if (BNXT_VF(bp))
1508 break;
1509
1510 if (bp->pf.port_id != port_id)
1511 break;
1512
Michael Chan4bb13ab2016-04-05 14:09:01 -04001513 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1514 break;
1515 }
Michael Chan87c374d2016-12-02 21:17:16 -05001516 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
Michael Chanfc0f1922016-06-13 02:25:30 -04001517 if (BNXT_PF(bp))
1518 goto async_event_process_exit;
1519 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1520 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001521 default:
Jeffrey Huang19241362016-02-26 04:00:00 -05001522 goto async_event_process_exit;
Michael Chanc0c050c2015-10-22 16:01:17 -04001523 }
Jeffrey Huang19241362016-02-26 04:00:00 -05001524 schedule_work(&bp->sp_task);
1525async_event_process_exit:
Michael Chana588e452016-12-07 00:26:21 -05001526 bnxt_ulp_async_events(bp, cmpl);
Michael Chanc0c050c2015-10-22 16:01:17 -04001527 return 0;
1528}
1529
1530static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1531{
1532 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1533 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1534 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1535 (struct hwrm_fwd_req_cmpl *)txcmp;
1536
1537 switch (cmpl_type) {
1538 case CMPL_BASE_TYPE_HWRM_DONE:
1539 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1540 if (seq_id == bp->hwrm_intr_seq_id)
1541 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1542 else
1543 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1544 break;
1545
1546 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1547 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1548
1549 if ((vf_id < bp->pf.first_vf_id) ||
1550 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1551 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1552 vf_id);
1553 return -EINVAL;
1554 }
1555
1556 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1557 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1558 schedule_work(&bp->sp_task);
1559 break;
1560
1561 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1562 bnxt_async_event_process(bp,
1563 (struct hwrm_async_event_cmpl *)txcmp);
1564
1565 default:
1566 break;
1567 }
1568
1569 return 0;
1570}
1571
1572static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1573{
1574 struct bnxt_napi *bnapi = dev_instance;
1575 struct bnxt *bp = bnapi->bp;
1576 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1577 u32 cons = RING_CMP(cpr->cp_raw_cons);
1578
1579 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1580 napi_schedule(&bnapi->napi);
1581 return IRQ_HANDLED;
1582}
1583
1584static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1585{
1586 u32 raw_cons = cpr->cp_raw_cons;
1587 u16 cons = RING_CMP(raw_cons);
1588 struct tx_cmp *txcmp;
1589
1590 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1591
1592 return TX_CMP_VALID(txcmp, raw_cons);
1593}
1594
Michael Chanc0c050c2015-10-22 16:01:17 -04001595static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1596{
1597 struct bnxt_napi *bnapi = dev_instance;
1598 struct bnxt *bp = bnapi->bp;
1599 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1600 u32 cons = RING_CMP(cpr->cp_raw_cons);
1601 u32 int_status;
1602
1603 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1604
1605 if (!bnxt_has_work(bp, cpr)) {
Jeffrey Huang11809492015-11-05 16:25:49 -05001606 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001607 /* return if erroneous interrupt */
1608 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1609 return IRQ_NONE;
1610 }
1611
1612 /* disable ring IRQ */
1613 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1614
1615 /* Return here if interrupt is shared and is disabled. */
1616 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1617 return IRQ_HANDLED;
1618
1619 napi_schedule(&bnapi->napi);
1620 return IRQ_HANDLED;
1621}
1622
1623static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1624{
1625 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1626 u32 raw_cons = cpr->cp_raw_cons;
1627 u32 cons;
1628 int tx_pkts = 0;
1629 int rx_pkts = 0;
1630 bool rx_event = false;
1631 bool agg_event = false;
1632 struct tx_cmp *txcmp;
1633
1634 while (1) {
1635 int rc;
1636
1637 cons = RING_CMP(raw_cons);
1638 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1639
1640 if (!TX_CMP_VALID(txcmp, raw_cons))
1641 break;
1642
Michael Chan67a95e22016-05-04 16:56:43 -04001643 /* The valid test of the entry must be done first before
1644 * reading any further.
1645 */
Michael Chanb67daab2016-05-15 03:04:51 -04001646 dma_rmb();
Michael Chanc0c050c2015-10-22 16:01:17 -04001647 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1648 tx_pkts++;
1649 /* return full budget so NAPI will complete. */
1650 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1651 rx_pkts = budget;
1652 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1653 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1654 if (likely(rc >= 0))
1655 rx_pkts += rc;
1656 else if (rc == -EBUSY) /* partial completion */
1657 break;
1658 rx_event = true;
1659 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1660 CMPL_BASE_TYPE_HWRM_DONE) ||
1661 (TX_CMP_TYPE(txcmp) ==
1662 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1663 (TX_CMP_TYPE(txcmp) ==
1664 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1665 bnxt_hwrm_handler(bp, txcmp);
1666 }
1667 raw_cons = NEXT_RAW_CMP(raw_cons);
1668
1669 if (rx_pkts == budget)
1670 break;
1671 }
1672
1673 cpr->cp_raw_cons = raw_cons;
1674 /* ACK completion ring before freeing tx ring and producing new
1675 * buffers in rx/agg rings to prevent overflowing the completion
1676 * ring.
1677 */
1678 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1679
1680 if (tx_pkts)
1681 bnxt_tx_int(bp, bnapi, tx_pkts);
1682
1683 if (rx_event) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001684 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001685
1686 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1687 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1688 if (agg_event) {
1689 writel(DB_KEY_RX | rxr->rx_agg_prod,
1690 rxr->rx_agg_doorbell);
1691 writel(DB_KEY_RX | rxr->rx_agg_prod,
1692 rxr->rx_agg_doorbell);
1693 }
1694 }
1695 return rx_pkts;
1696}
1697
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04001698static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1699{
1700 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1701 struct bnxt *bp = bnapi->bp;
1702 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1703 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1704 struct tx_cmp *txcmp;
1705 struct rx_cmp_ext *rxcmp1;
1706 u32 cp_cons, tmp_raw_cons;
1707 u32 raw_cons = cpr->cp_raw_cons;
1708 u32 rx_pkts = 0;
1709 bool agg_event = false;
1710
1711 while (1) {
1712 int rc;
1713
1714 cp_cons = RING_CMP(raw_cons);
1715 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1716
1717 if (!TX_CMP_VALID(txcmp, raw_cons))
1718 break;
1719
1720 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1721 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1722 cp_cons = RING_CMP(tmp_raw_cons);
1723 rxcmp1 = (struct rx_cmp_ext *)
1724 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1725
1726 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1727 break;
1728
1729 /* force an error to recycle the buffer */
1730 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1731 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1732
1733 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1734 if (likely(rc == -EIO))
1735 rx_pkts++;
1736 else if (rc == -EBUSY) /* partial completion */
1737 break;
1738 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1739 CMPL_BASE_TYPE_HWRM_DONE)) {
1740 bnxt_hwrm_handler(bp, txcmp);
1741 } else {
1742 netdev_err(bp->dev,
1743 "Invalid completion received on special ring\n");
1744 }
1745 raw_cons = NEXT_RAW_CMP(raw_cons);
1746
1747 if (rx_pkts == budget)
1748 break;
1749 }
1750
1751 cpr->cp_raw_cons = raw_cons;
1752 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1753 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1754 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1755
1756 if (agg_event) {
1757 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1758 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1759 }
1760
1761 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
1762 napi_complete(napi);
1763 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1764 }
1765 return rx_pkts;
1766}
1767
Michael Chanc0c050c2015-10-22 16:01:17 -04001768static int bnxt_poll(struct napi_struct *napi, int budget)
1769{
1770 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1771 struct bnxt *bp = bnapi->bp;
1772 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1773 int work_done = 0;
1774
Michael Chanc0c050c2015-10-22 16:01:17 -04001775 while (1) {
1776 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1777
1778 if (work_done >= budget)
1779 break;
1780
1781 if (!bnxt_has_work(bp, cpr)) {
Michael Chane7b95692016-12-29 12:13:32 -05001782 if (napi_complete_done(napi, work_done))
1783 BNXT_CP_DB_REARM(cpr->cp_doorbell,
1784 cpr->cp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001785 break;
1786 }
1787 }
1788 mmiowb();
Michael Chanc0c050c2015-10-22 16:01:17 -04001789 return work_done;
1790}
1791
Michael Chanc0c050c2015-10-22 16:01:17 -04001792static void bnxt_free_tx_skbs(struct bnxt *bp)
1793{
1794 int i, max_idx;
1795 struct pci_dev *pdev = bp->pdev;
1796
Michael Chanb6ab4b02016-01-02 23:44:59 -05001797 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001798 return;
1799
1800 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1801 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001802 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001803 int j;
1804
Michael Chanc0c050c2015-10-22 16:01:17 -04001805 for (j = 0; j < max_idx;) {
1806 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1807 struct sk_buff *skb = tx_buf->skb;
1808 int k, last;
1809
1810 if (!skb) {
1811 j++;
1812 continue;
1813 }
1814
1815 tx_buf->skb = NULL;
1816
1817 if (tx_buf->is_push) {
1818 dev_kfree_skb(skb);
1819 j += 2;
1820 continue;
1821 }
1822
1823 dma_unmap_single(&pdev->dev,
1824 dma_unmap_addr(tx_buf, mapping),
1825 skb_headlen(skb),
1826 PCI_DMA_TODEVICE);
1827
1828 last = tx_buf->nr_frags;
1829 j += 2;
Michael Chand612a572016-01-28 03:11:22 -05001830 for (k = 0; k < last; k++, j++) {
1831 int ring_idx = j & bp->tx_ring_mask;
Michael Chanc0c050c2015-10-22 16:01:17 -04001832 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1833
Michael Chand612a572016-01-28 03:11:22 -05001834 tx_buf = &txr->tx_buf_ring[ring_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04001835 dma_unmap_page(
1836 &pdev->dev,
1837 dma_unmap_addr(tx_buf, mapping),
1838 skb_frag_size(frag), PCI_DMA_TODEVICE);
1839 }
1840 dev_kfree_skb(skb);
1841 }
1842 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1843 }
1844}
1845
1846static void bnxt_free_rx_skbs(struct bnxt *bp)
1847{
1848 int i, max_idx, max_agg_idx;
1849 struct pci_dev *pdev = bp->pdev;
1850
Michael Chanb6ab4b02016-01-02 23:44:59 -05001851 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001852 return;
1853
1854 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1855 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1856 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001857 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001858 int j;
1859
Michael Chanc0c050c2015-10-22 16:01:17 -04001860 if (rxr->rx_tpa) {
1861 for (j = 0; j < MAX_TPA; j++) {
1862 struct bnxt_tpa_info *tpa_info =
1863 &rxr->rx_tpa[j];
1864 u8 *data = tpa_info->data;
1865
1866 if (!data)
1867 continue;
1868
1869 dma_unmap_single(
1870 &pdev->dev,
1871 dma_unmap_addr(tpa_info, mapping),
1872 bp->rx_buf_use_size,
1873 PCI_DMA_FROMDEVICE);
1874
1875 tpa_info->data = NULL;
1876
1877 kfree(data);
1878 }
1879 }
1880
1881 for (j = 0; j < max_idx; j++) {
1882 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1883 u8 *data = rx_buf->data;
1884
1885 if (!data)
1886 continue;
1887
1888 dma_unmap_single(&pdev->dev,
1889 dma_unmap_addr(rx_buf, mapping),
1890 bp->rx_buf_use_size,
1891 PCI_DMA_FROMDEVICE);
1892
1893 rx_buf->data = NULL;
1894
1895 kfree(data);
1896 }
1897
1898 for (j = 0; j < max_agg_idx; j++) {
1899 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1900 &rxr->rx_agg_ring[j];
1901 struct page *page = rx_agg_buf->page;
1902
1903 if (!page)
1904 continue;
1905
1906 dma_unmap_page(&pdev->dev,
1907 dma_unmap_addr(rx_agg_buf, mapping),
Michael Chan2839f282016-04-25 02:30:50 -04001908 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
Michael Chanc0c050c2015-10-22 16:01:17 -04001909
1910 rx_agg_buf->page = NULL;
1911 __clear_bit(j, rxr->rx_agg_bmap);
1912
1913 __free_page(page);
1914 }
Michael Chan89d0a062016-04-25 02:30:51 -04001915 if (rxr->rx_page) {
1916 __free_page(rxr->rx_page);
1917 rxr->rx_page = NULL;
1918 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001919 }
1920}
1921
1922static void bnxt_free_skbs(struct bnxt *bp)
1923{
1924 bnxt_free_tx_skbs(bp);
1925 bnxt_free_rx_skbs(bp);
1926}
1927
1928static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1929{
1930 struct pci_dev *pdev = bp->pdev;
1931 int i;
1932
1933 for (i = 0; i < ring->nr_pages; i++) {
1934 if (!ring->pg_arr[i])
1935 continue;
1936
1937 dma_free_coherent(&pdev->dev, ring->page_size,
1938 ring->pg_arr[i], ring->dma_arr[i]);
1939
1940 ring->pg_arr[i] = NULL;
1941 }
1942 if (ring->pg_tbl) {
1943 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1944 ring->pg_tbl, ring->pg_tbl_map);
1945 ring->pg_tbl = NULL;
1946 }
1947 if (ring->vmem_size && *ring->vmem) {
1948 vfree(*ring->vmem);
1949 *ring->vmem = NULL;
1950 }
1951}
1952
1953static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1954{
1955 int i;
1956 struct pci_dev *pdev = bp->pdev;
1957
1958 if (ring->nr_pages > 1) {
1959 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1960 ring->nr_pages * 8,
1961 &ring->pg_tbl_map,
1962 GFP_KERNEL);
1963 if (!ring->pg_tbl)
1964 return -ENOMEM;
1965 }
1966
1967 for (i = 0; i < ring->nr_pages; i++) {
1968 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1969 ring->page_size,
1970 &ring->dma_arr[i],
1971 GFP_KERNEL);
1972 if (!ring->pg_arr[i])
1973 return -ENOMEM;
1974
1975 if (ring->nr_pages > 1)
1976 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1977 }
1978
1979 if (ring->vmem_size) {
1980 *ring->vmem = vzalloc(ring->vmem_size);
1981 if (!(*ring->vmem))
1982 return -ENOMEM;
1983 }
1984 return 0;
1985}
1986
1987static void bnxt_free_rx_rings(struct bnxt *bp)
1988{
1989 int i;
1990
Michael Chanb6ab4b02016-01-02 23:44:59 -05001991 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001992 return;
1993
1994 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001995 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001996 struct bnxt_ring_struct *ring;
1997
Michael Chanc0c050c2015-10-22 16:01:17 -04001998 kfree(rxr->rx_tpa);
1999 rxr->rx_tpa = NULL;
2000
2001 kfree(rxr->rx_agg_bmap);
2002 rxr->rx_agg_bmap = NULL;
2003
2004 ring = &rxr->rx_ring_struct;
2005 bnxt_free_ring(bp, ring);
2006
2007 ring = &rxr->rx_agg_ring_struct;
2008 bnxt_free_ring(bp, ring);
2009 }
2010}
2011
2012static int bnxt_alloc_rx_rings(struct bnxt *bp)
2013{
2014 int i, rc, agg_rings = 0, tpa_rings = 0;
2015
Michael Chanb6ab4b02016-01-02 23:44:59 -05002016 if (!bp->rx_ring)
2017 return -ENOMEM;
2018
Michael Chanc0c050c2015-10-22 16:01:17 -04002019 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2020 agg_rings = 1;
2021
2022 if (bp->flags & BNXT_FLAG_TPA)
2023 tpa_rings = 1;
2024
2025 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002026 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002027 struct bnxt_ring_struct *ring;
2028
Michael Chanc0c050c2015-10-22 16:01:17 -04002029 ring = &rxr->rx_ring_struct;
2030
2031 rc = bnxt_alloc_ring(bp, ring);
2032 if (rc)
2033 return rc;
2034
2035 if (agg_rings) {
2036 u16 mem_size;
2037
2038 ring = &rxr->rx_agg_ring_struct;
2039 rc = bnxt_alloc_ring(bp, ring);
2040 if (rc)
2041 return rc;
2042
2043 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2044 mem_size = rxr->rx_agg_bmap_size / 8;
2045 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2046 if (!rxr->rx_agg_bmap)
2047 return -ENOMEM;
2048
2049 if (tpa_rings) {
2050 rxr->rx_tpa = kcalloc(MAX_TPA,
2051 sizeof(struct bnxt_tpa_info),
2052 GFP_KERNEL);
2053 if (!rxr->rx_tpa)
2054 return -ENOMEM;
2055 }
2056 }
2057 }
2058 return 0;
2059}
2060
2061static void bnxt_free_tx_rings(struct bnxt *bp)
2062{
2063 int i;
2064 struct pci_dev *pdev = bp->pdev;
2065
Michael Chanb6ab4b02016-01-02 23:44:59 -05002066 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002067 return;
2068
2069 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002070 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002071 struct bnxt_ring_struct *ring;
2072
Michael Chanc0c050c2015-10-22 16:01:17 -04002073 if (txr->tx_push) {
2074 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2075 txr->tx_push, txr->tx_push_mapping);
2076 txr->tx_push = NULL;
2077 }
2078
2079 ring = &txr->tx_ring_struct;
2080
2081 bnxt_free_ring(bp, ring);
2082 }
2083}
2084
2085static int bnxt_alloc_tx_rings(struct bnxt *bp)
2086{
2087 int i, j, rc;
2088 struct pci_dev *pdev = bp->pdev;
2089
2090 bp->tx_push_size = 0;
2091 if (bp->tx_push_thresh) {
2092 int push_size;
2093
2094 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2095 bp->tx_push_thresh);
2096
Michael Chan4419dbe2016-02-10 17:33:49 -05002097 if (push_size > 256) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002098 push_size = 0;
2099 bp->tx_push_thresh = 0;
2100 }
2101
2102 bp->tx_push_size = push_size;
2103 }
2104
2105 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002106 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002107 struct bnxt_ring_struct *ring;
2108
Michael Chanc0c050c2015-10-22 16:01:17 -04002109 ring = &txr->tx_ring_struct;
2110
2111 rc = bnxt_alloc_ring(bp, ring);
2112 if (rc)
2113 return rc;
2114
2115 if (bp->tx_push_size) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002116 dma_addr_t mapping;
2117
2118 /* One pre-allocated DMA buffer to backup
2119 * TX push operation
2120 */
2121 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2122 bp->tx_push_size,
2123 &txr->tx_push_mapping,
2124 GFP_KERNEL);
2125
2126 if (!txr->tx_push)
2127 return -ENOMEM;
2128
Michael Chanc0c050c2015-10-22 16:01:17 -04002129 mapping = txr->tx_push_mapping +
2130 sizeof(struct tx_push_bd);
Michael Chan4419dbe2016-02-10 17:33:49 -05002131 txr->data_mapping = cpu_to_le64(mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -04002132
Michael Chan4419dbe2016-02-10 17:33:49 -05002133 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
Michael Chanc0c050c2015-10-22 16:01:17 -04002134 }
2135 ring->queue_id = bp->q_info[j].queue_id;
2136 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2137 j++;
2138 }
2139 return 0;
2140}
2141
2142static void bnxt_free_cp_rings(struct bnxt *bp)
2143{
2144 int i;
2145
2146 if (!bp->bnapi)
2147 return;
2148
2149 for (i = 0; i < bp->cp_nr_rings; i++) {
2150 struct bnxt_napi *bnapi = bp->bnapi[i];
2151 struct bnxt_cp_ring_info *cpr;
2152 struct bnxt_ring_struct *ring;
2153
2154 if (!bnapi)
2155 continue;
2156
2157 cpr = &bnapi->cp_ring;
2158 ring = &cpr->cp_ring_struct;
2159
2160 bnxt_free_ring(bp, ring);
2161 }
2162}
2163
2164static int bnxt_alloc_cp_rings(struct bnxt *bp)
2165{
2166 int i, rc;
2167
2168 for (i = 0; i < bp->cp_nr_rings; i++) {
2169 struct bnxt_napi *bnapi = bp->bnapi[i];
2170 struct bnxt_cp_ring_info *cpr;
2171 struct bnxt_ring_struct *ring;
2172
2173 if (!bnapi)
2174 continue;
2175
2176 cpr = &bnapi->cp_ring;
2177 ring = &cpr->cp_ring_struct;
2178
2179 rc = bnxt_alloc_ring(bp, ring);
2180 if (rc)
2181 return rc;
2182 }
2183 return 0;
2184}
2185
2186static void bnxt_init_ring_struct(struct bnxt *bp)
2187{
2188 int i;
2189
2190 for (i = 0; i < bp->cp_nr_rings; i++) {
2191 struct bnxt_napi *bnapi = bp->bnapi[i];
2192 struct bnxt_cp_ring_info *cpr;
2193 struct bnxt_rx_ring_info *rxr;
2194 struct bnxt_tx_ring_info *txr;
2195 struct bnxt_ring_struct *ring;
2196
2197 if (!bnapi)
2198 continue;
2199
2200 cpr = &bnapi->cp_ring;
2201 ring = &cpr->cp_ring_struct;
2202 ring->nr_pages = bp->cp_nr_pages;
2203 ring->page_size = HW_CMPD_RING_SIZE;
2204 ring->pg_arr = (void **)cpr->cp_desc_ring;
2205 ring->dma_arr = cpr->cp_desc_mapping;
2206 ring->vmem_size = 0;
2207
Michael Chanb6ab4b02016-01-02 23:44:59 -05002208 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002209 if (!rxr)
2210 goto skip_rx;
2211
Michael Chanc0c050c2015-10-22 16:01:17 -04002212 ring = &rxr->rx_ring_struct;
2213 ring->nr_pages = bp->rx_nr_pages;
2214 ring->page_size = HW_RXBD_RING_SIZE;
2215 ring->pg_arr = (void **)rxr->rx_desc_ring;
2216 ring->dma_arr = rxr->rx_desc_mapping;
2217 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2218 ring->vmem = (void **)&rxr->rx_buf_ring;
2219
2220 ring = &rxr->rx_agg_ring_struct;
2221 ring->nr_pages = bp->rx_agg_nr_pages;
2222 ring->page_size = HW_RXBD_RING_SIZE;
2223 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2224 ring->dma_arr = rxr->rx_agg_desc_mapping;
2225 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2226 ring->vmem = (void **)&rxr->rx_agg_ring;
2227
Michael Chan3b2b7d92016-01-02 23:45:00 -05002228skip_rx:
Michael Chanb6ab4b02016-01-02 23:44:59 -05002229 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002230 if (!txr)
2231 continue;
2232
Michael Chanc0c050c2015-10-22 16:01:17 -04002233 ring = &txr->tx_ring_struct;
2234 ring->nr_pages = bp->tx_nr_pages;
2235 ring->page_size = HW_RXBD_RING_SIZE;
2236 ring->pg_arr = (void **)txr->tx_desc_ring;
2237 ring->dma_arr = txr->tx_desc_mapping;
2238 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2239 ring->vmem = (void **)&txr->tx_buf_ring;
2240 }
2241}
2242
2243static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2244{
2245 int i;
2246 u32 prod;
2247 struct rx_bd **rx_buf_ring;
2248
2249 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2250 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2251 int j;
2252 struct rx_bd *rxbd;
2253
2254 rxbd = rx_buf_ring[i];
2255 if (!rxbd)
2256 continue;
2257
2258 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2259 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2260 rxbd->rx_bd_opaque = prod;
2261 }
2262 }
2263}
2264
2265static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2266{
2267 struct net_device *dev = bp->dev;
Michael Chanc0c050c2015-10-22 16:01:17 -04002268 struct bnxt_rx_ring_info *rxr;
2269 struct bnxt_ring_struct *ring;
2270 u32 prod, type;
2271 int i;
2272
Michael Chanc0c050c2015-10-22 16:01:17 -04002273 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2274 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2275
2276 if (NET_IP_ALIGN == 2)
2277 type |= RX_BD_FLAGS_SOP;
2278
Michael Chanb6ab4b02016-01-02 23:44:59 -05002279 rxr = &bp->rx_ring[ring_nr];
Michael Chanc0c050c2015-10-22 16:01:17 -04002280 ring = &rxr->rx_ring_struct;
2281 bnxt_init_rxbd_pages(ring, type);
2282
2283 prod = rxr->rx_prod;
2284 for (i = 0; i < bp->rx_ring_size; i++) {
2285 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2286 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2287 ring_nr, i, bp->rx_ring_size);
2288 break;
2289 }
2290 prod = NEXT_RX(prod);
2291 }
2292 rxr->rx_prod = prod;
2293 ring->fw_ring_id = INVALID_HW_RING_ID;
2294
Michael Chanedd0c2c2015-12-27 18:19:19 -05002295 ring = &rxr->rx_agg_ring_struct;
2296 ring->fw_ring_id = INVALID_HW_RING_ID;
2297
Michael Chanc0c050c2015-10-22 16:01:17 -04002298 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2299 return 0;
2300
Michael Chan2839f282016-04-25 02:30:50 -04002301 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
Michael Chanc0c050c2015-10-22 16:01:17 -04002302 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2303
2304 bnxt_init_rxbd_pages(ring, type);
2305
2306 prod = rxr->rx_agg_prod;
2307 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2308 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2309 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2310 ring_nr, i, bp->rx_ring_size);
2311 break;
2312 }
2313 prod = NEXT_RX_AGG(prod);
2314 }
2315 rxr->rx_agg_prod = prod;
Michael Chanc0c050c2015-10-22 16:01:17 -04002316
2317 if (bp->flags & BNXT_FLAG_TPA) {
2318 if (rxr->rx_tpa) {
2319 u8 *data;
2320 dma_addr_t mapping;
2321
2322 for (i = 0; i < MAX_TPA; i++) {
2323 data = __bnxt_alloc_rx_data(bp, &mapping,
2324 GFP_KERNEL);
2325 if (!data)
2326 return -ENOMEM;
2327
2328 rxr->rx_tpa[i].data = data;
2329 rxr->rx_tpa[i].mapping = mapping;
2330 }
2331 } else {
2332 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2333 return -ENOMEM;
2334 }
2335 }
2336
2337 return 0;
2338}
2339
2340static int bnxt_init_rx_rings(struct bnxt *bp)
2341{
2342 int i, rc = 0;
2343
2344 for (i = 0; i < bp->rx_nr_rings; i++) {
2345 rc = bnxt_init_one_rx_ring(bp, i);
2346 if (rc)
2347 break;
2348 }
2349
2350 return rc;
2351}
2352
2353static int bnxt_init_tx_rings(struct bnxt *bp)
2354{
2355 u16 i;
2356
2357 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2358 MAX_SKB_FRAGS + 1);
2359
2360 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002361 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002362 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2363
2364 ring->fw_ring_id = INVALID_HW_RING_ID;
2365 }
2366
2367 return 0;
2368}
2369
2370static void bnxt_free_ring_grps(struct bnxt *bp)
2371{
2372 kfree(bp->grp_info);
2373 bp->grp_info = NULL;
2374}
2375
2376static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2377{
2378 int i;
2379
2380 if (irq_re_init) {
2381 bp->grp_info = kcalloc(bp->cp_nr_rings,
2382 sizeof(struct bnxt_ring_grp_info),
2383 GFP_KERNEL);
2384 if (!bp->grp_info)
2385 return -ENOMEM;
2386 }
2387 for (i = 0; i < bp->cp_nr_rings; i++) {
2388 if (irq_re_init)
2389 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2390 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2391 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2392 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2393 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2394 }
2395 return 0;
2396}
2397
2398static void bnxt_free_vnics(struct bnxt *bp)
2399{
2400 kfree(bp->vnic_info);
2401 bp->vnic_info = NULL;
2402 bp->nr_vnics = 0;
2403}
2404
2405static int bnxt_alloc_vnics(struct bnxt *bp)
2406{
2407 int num_vnics = 1;
2408
2409#ifdef CONFIG_RFS_ACCEL
2410 if (bp->flags & BNXT_FLAG_RFS)
2411 num_vnics += bp->rx_nr_rings;
2412#endif
2413
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04002414 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2415 num_vnics++;
2416
Michael Chanc0c050c2015-10-22 16:01:17 -04002417 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2418 GFP_KERNEL);
2419 if (!bp->vnic_info)
2420 return -ENOMEM;
2421
2422 bp->nr_vnics = num_vnics;
2423 return 0;
2424}
2425
2426static void bnxt_init_vnics(struct bnxt *bp)
2427{
2428 int i;
2429
2430 for (i = 0; i < bp->nr_vnics; i++) {
2431 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2432
2433 vnic->fw_vnic_id = INVALID_HW_RING_ID;
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04002434 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2435 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04002436 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2437
2438 if (bp->vnic_info[i].rss_hash_key) {
2439 if (i == 0)
2440 prandom_bytes(vnic->rss_hash_key,
2441 HW_HASH_KEY_SIZE);
2442 else
2443 memcpy(vnic->rss_hash_key,
2444 bp->vnic_info[0].rss_hash_key,
2445 HW_HASH_KEY_SIZE);
2446 }
2447 }
2448}
2449
2450static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2451{
2452 int pages;
2453
2454 pages = ring_size / desc_per_pg;
2455
2456 if (!pages)
2457 return 1;
2458
2459 pages++;
2460
2461 while (pages & (pages - 1))
2462 pages++;
2463
2464 return pages;
2465}
2466
2467static void bnxt_set_tpa_flags(struct bnxt *bp)
2468{
2469 bp->flags &= ~BNXT_FLAG_TPA;
2470 if (bp->dev->features & NETIF_F_LRO)
2471 bp->flags |= BNXT_FLAG_LRO;
Michael Chan94758f82016-06-13 02:25:35 -04002472 if (bp->dev->features & NETIF_F_GRO)
Michael Chanc0c050c2015-10-22 16:01:17 -04002473 bp->flags |= BNXT_FLAG_GRO;
2474}
2475
2476/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2477 * be set on entry.
2478 */
2479void bnxt_set_ring_params(struct bnxt *bp)
2480{
2481 u32 ring_size, rx_size, rx_space;
2482 u32 agg_factor = 0, agg_ring_size = 0;
2483
2484 /* 8 for CRC and VLAN */
2485 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2486
2487 rx_space = rx_size + NET_SKB_PAD +
2488 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2489
2490 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2491 ring_size = bp->rx_ring_size;
2492 bp->rx_agg_ring_size = 0;
2493 bp->rx_agg_nr_pages = 0;
2494
2495 if (bp->flags & BNXT_FLAG_TPA)
Michael Chan2839f282016-04-25 02:30:50 -04002496 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
Michael Chanc0c050c2015-10-22 16:01:17 -04002497
2498 bp->flags &= ~BNXT_FLAG_JUMBO;
2499 if (rx_space > PAGE_SIZE) {
2500 u32 jumbo_factor;
2501
2502 bp->flags |= BNXT_FLAG_JUMBO;
2503 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2504 if (jumbo_factor > agg_factor)
2505 agg_factor = jumbo_factor;
2506 }
2507 agg_ring_size = ring_size * agg_factor;
2508
2509 if (agg_ring_size) {
2510 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2511 RX_DESC_CNT);
2512 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2513 u32 tmp = agg_ring_size;
2514
2515 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2516 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2517 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2518 tmp, agg_ring_size);
2519 }
2520 bp->rx_agg_ring_size = agg_ring_size;
2521 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2522 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2523 rx_space = rx_size + NET_SKB_PAD +
2524 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2525 }
2526
2527 bp->rx_buf_use_size = rx_size;
2528 bp->rx_buf_size = rx_space;
2529
2530 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2531 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2532
2533 ring_size = bp->tx_ring_size;
2534 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2535 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2536
2537 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2538 bp->cp_ring_size = ring_size;
2539
2540 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2541 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2542 bp->cp_nr_pages = MAX_CP_PAGES;
2543 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2544 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2545 ring_size, bp->cp_ring_size);
2546 }
2547 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2548 bp->cp_ring_mask = bp->cp_bit - 1;
2549}
2550
2551static void bnxt_free_vnic_attributes(struct bnxt *bp)
2552{
2553 int i;
2554 struct bnxt_vnic_info *vnic;
2555 struct pci_dev *pdev = bp->pdev;
2556
2557 if (!bp->vnic_info)
2558 return;
2559
2560 for (i = 0; i < bp->nr_vnics; i++) {
2561 vnic = &bp->vnic_info[i];
2562
2563 kfree(vnic->fw_grp_ids);
2564 vnic->fw_grp_ids = NULL;
2565
2566 kfree(vnic->uc_list);
2567 vnic->uc_list = NULL;
2568
2569 if (vnic->mc_list) {
2570 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2571 vnic->mc_list, vnic->mc_list_mapping);
2572 vnic->mc_list = NULL;
2573 }
2574
2575 if (vnic->rss_table) {
2576 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2577 vnic->rss_table,
2578 vnic->rss_table_dma_addr);
2579 vnic->rss_table = NULL;
2580 }
2581
2582 vnic->rss_hash_key = NULL;
2583 vnic->flags = 0;
2584 }
2585}
2586
2587static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2588{
2589 int i, rc = 0, size;
2590 struct bnxt_vnic_info *vnic;
2591 struct pci_dev *pdev = bp->pdev;
2592 int max_rings;
2593
2594 for (i = 0; i < bp->nr_vnics; i++) {
2595 vnic = &bp->vnic_info[i];
2596
2597 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2598 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2599
2600 if (mem_size > 0) {
2601 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2602 if (!vnic->uc_list) {
2603 rc = -ENOMEM;
2604 goto out;
2605 }
2606 }
2607 }
2608
2609 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2610 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2611 vnic->mc_list =
2612 dma_alloc_coherent(&pdev->dev,
2613 vnic->mc_list_size,
2614 &vnic->mc_list_mapping,
2615 GFP_KERNEL);
2616 if (!vnic->mc_list) {
2617 rc = -ENOMEM;
2618 goto out;
2619 }
2620 }
2621
2622 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2623 max_rings = bp->rx_nr_rings;
2624 else
2625 max_rings = 1;
2626
2627 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2628 if (!vnic->fw_grp_ids) {
2629 rc = -ENOMEM;
2630 goto out;
2631 }
2632
Michael Chanae10ae72016-12-29 12:13:38 -05002633 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
2634 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
2635 continue;
2636
Michael Chanc0c050c2015-10-22 16:01:17 -04002637 /* Allocate rss table and hash key */
2638 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2639 &vnic->rss_table_dma_addr,
2640 GFP_KERNEL);
2641 if (!vnic->rss_table) {
2642 rc = -ENOMEM;
2643 goto out;
2644 }
2645
2646 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2647
2648 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2649 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2650 }
2651 return 0;
2652
2653out:
2654 return rc;
2655}
2656
2657static void bnxt_free_hwrm_resources(struct bnxt *bp)
2658{
2659 struct pci_dev *pdev = bp->pdev;
2660
2661 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2662 bp->hwrm_cmd_resp_dma_addr);
2663
2664 bp->hwrm_cmd_resp_addr = NULL;
2665 if (bp->hwrm_dbg_resp_addr) {
2666 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2667 bp->hwrm_dbg_resp_addr,
2668 bp->hwrm_dbg_resp_dma_addr);
2669
2670 bp->hwrm_dbg_resp_addr = NULL;
2671 }
2672}
2673
2674static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2675{
2676 struct pci_dev *pdev = bp->pdev;
2677
2678 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2679 &bp->hwrm_cmd_resp_dma_addr,
2680 GFP_KERNEL);
2681 if (!bp->hwrm_cmd_resp_addr)
2682 return -ENOMEM;
2683 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2684 HWRM_DBG_REG_BUF_SIZE,
2685 &bp->hwrm_dbg_resp_dma_addr,
2686 GFP_KERNEL);
2687 if (!bp->hwrm_dbg_resp_addr)
2688 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2689
2690 return 0;
2691}
2692
2693static void bnxt_free_stats(struct bnxt *bp)
2694{
2695 u32 size, i;
2696 struct pci_dev *pdev = bp->pdev;
2697
Michael Chan3bdf56c2016-03-07 15:38:45 -05002698 if (bp->hw_rx_port_stats) {
2699 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2700 bp->hw_rx_port_stats,
2701 bp->hw_rx_port_stats_map);
2702 bp->hw_rx_port_stats = NULL;
2703 bp->flags &= ~BNXT_FLAG_PORT_STATS;
2704 }
2705
Michael Chanc0c050c2015-10-22 16:01:17 -04002706 if (!bp->bnapi)
2707 return;
2708
2709 size = sizeof(struct ctx_hw_stats);
2710
2711 for (i = 0; i < bp->cp_nr_rings; i++) {
2712 struct bnxt_napi *bnapi = bp->bnapi[i];
2713 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2714
2715 if (cpr->hw_stats) {
2716 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2717 cpr->hw_stats_map);
2718 cpr->hw_stats = NULL;
2719 }
2720 }
2721}
2722
2723static int bnxt_alloc_stats(struct bnxt *bp)
2724{
2725 u32 size, i;
2726 struct pci_dev *pdev = bp->pdev;
2727
2728 size = sizeof(struct ctx_hw_stats);
2729
2730 for (i = 0; i < bp->cp_nr_rings; i++) {
2731 struct bnxt_napi *bnapi = bp->bnapi[i];
2732 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2733
2734 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2735 &cpr->hw_stats_map,
2736 GFP_KERNEL);
2737 if (!cpr->hw_stats)
2738 return -ENOMEM;
2739
2740 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2741 }
Michael Chan3bdf56c2016-03-07 15:38:45 -05002742
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04002743 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05002744 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2745 sizeof(struct tx_port_stats) + 1024;
2746
2747 bp->hw_rx_port_stats =
2748 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2749 &bp->hw_rx_port_stats_map,
2750 GFP_KERNEL);
2751 if (!bp->hw_rx_port_stats)
2752 return -ENOMEM;
2753
2754 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
2755 512;
2756 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
2757 sizeof(struct rx_port_stats) + 512;
2758 bp->flags |= BNXT_FLAG_PORT_STATS;
2759 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002760 return 0;
2761}
2762
2763static void bnxt_clear_ring_indices(struct bnxt *bp)
2764{
2765 int i;
2766
2767 if (!bp->bnapi)
2768 return;
2769
2770 for (i = 0; i < bp->cp_nr_rings; i++) {
2771 struct bnxt_napi *bnapi = bp->bnapi[i];
2772 struct bnxt_cp_ring_info *cpr;
2773 struct bnxt_rx_ring_info *rxr;
2774 struct bnxt_tx_ring_info *txr;
2775
2776 if (!bnapi)
2777 continue;
2778
2779 cpr = &bnapi->cp_ring;
2780 cpr->cp_raw_cons = 0;
2781
Michael Chanb6ab4b02016-01-02 23:44:59 -05002782 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002783 if (txr) {
2784 txr->tx_prod = 0;
2785 txr->tx_cons = 0;
2786 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002787
Michael Chanb6ab4b02016-01-02 23:44:59 -05002788 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002789 if (rxr) {
2790 rxr->rx_prod = 0;
2791 rxr->rx_agg_prod = 0;
2792 rxr->rx_sw_agg_prod = 0;
Michael Chan376a5b82016-05-10 19:17:59 -04002793 rxr->rx_next_cons = 0;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002794 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002795 }
2796}
2797
2798static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2799{
2800#ifdef CONFIG_RFS_ACCEL
2801 int i;
2802
2803 /* Under rtnl_lock and all our NAPIs have been disabled. It's
2804 * safe to delete the hash table.
2805 */
2806 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2807 struct hlist_head *head;
2808 struct hlist_node *tmp;
2809 struct bnxt_ntuple_filter *fltr;
2810
2811 head = &bp->ntp_fltr_hash_tbl[i];
2812 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2813 hlist_del(&fltr->hash);
2814 kfree(fltr);
2815 }
2816 }
2817 if (irq_reinit) {
2818 kfree(bp->ntp_fltr_bmap);
2819 bp->ntp_fltr_bmap = NULL;
2820 }
2821 bp->ntp_fltr_count = 0;
2822#endif
2823}
2824
2825static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2826{
2827#ifdef CONFIG_RFS_ACCEL
2828 int i, rc = 0;
2829
2830 if (!(bp->flags & BNXT_FLAG_RFS))
2831 return 0;
2832
2833 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2834 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2835
2836 bp->ntp_fltr_count = 0;
2837 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2838 GFP_KERNEL);
2839
2840 if (!bp->ntp_fltr_bmap)
2841 rc = -ENOMEM;
2842
2843 return rc;
2844#else
2845 return 0;
2846#endif
2847}
2848
2849static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2850{
2851 bnxt_free_vnic_attributes(bp);
2852 bnxt_free_tx_rings(bp);
2853 bnxt_free_rx_rings(bp);
2854 bnxt_free_cp_rings(bp);
2855 bnxt_free_ntp_fltrs(bp, irq_re_init);
2856 if (irq_re_init) {
2857 bnxt_free_stats(bp);
2858 bnxt_free_ring_grps(bp);
2859 bnxt_free_vnics(bp);
Michael Chanb6ab4b02016-01-02 23:44:59 -05002860 kfree(bp->tx_ring);
2861 bp->tx_ring = NULL;
2862 kfree(bp->rx_ring);
2863 bp->rx_ring = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002864 kfree(bp->bnapi);
2865 bp->bnapi = NULL;
2866 } else {
2867 bnxt_clear_ring_indices(bp);
2868 }
2869}
2870
2871static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2872{
Michael Chan01657bc2016-01-02 23:45:03 -05002873 int i, j, rc, size, arr_size;
Michael Chanc0c050c2015-10-22 16:01:17 -04002874 void *bnapi;
2875
2876 if (irq_re_init) {
2877 /* Allocate bnapi mem pointer array and mem block for
2878 * all queues
2879 */
2880 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2881 bp->cp_nr_rings);
2882 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2883 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2884 if (!bnapi)
2885 return -ENOMEM;
2886
2887 bp->bnapi = bnapi;
2888 bnapi += arr_size;
2889 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2890 bp->bnapi[i] = bnapi;
2891 bp->bnapi[i]->index = i;
2892 bp->bnapi[i]->bp = bp;
2893 }
2894
Michael Chanb6ab4b02016-01-02 23:44:59 -05002895 bp->rx_ring = kcalloc(bp->rx_nr_rings,
2896 sizeof(struct bnxt_rx_ring_info),
2897 GFP_KERNEL);
2898 if (!bp->rx_ring)
2899 return -ENOMEM;
2900
2901 for (i = 0; i < bp->rx_nr_rings; i++) {
2902 bp->rx_ring[i].bnapi = bp->bnapi[i];
2903 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
2904 }
2905
2906 bp->tx_ring = kcalloc(bp->tx_nr_rings,
2907 sizeof(struct bnxt_tx_ring_info),
2908 GFP_KERNEL);
2909 if (!bp->tx_ring)
2910 return -ENOMEM;
2911
Michael Chan01657bc2016-01-02 23:45:03 -05002912 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
2913 j = 0;
2914 else
2915 j = bp->rx_nr_rings;
2916
2917 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
2918 bp->tx_ring[i].bnapi = bp->bnapi[j];
2919 bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
Michael Chanb6ab4b02016-01-02 23:44:59 -05002920 }
2921
Michael Chanc0c050c2015-10-22 16:01:17 -04002922 rc = bnxt_alloc_stats(bp);
2923 if (rc)
2924 goto alloc_mem_err;
2925
2926 rc = bnxt_alloc_ntp_fltrs(bp);
2927 if (rc)
2928 goto alloc_mem_err;
2929
2930 rc = bnxt_alloc_vnics(bp);
2931 if (rc)
2932 goto alloc_mem_err;
2933 }
2934
2935 bnxt_init_ring_struct(bp);
2936
2937 rc = bnxt_alloc_rx_rings(bp);
2938 if (rc)
2939 goto alloc_mem_err;
2940
2941 rc = bnxt_alloc_tx_rings(bp);
2942 if (rc)
2943 goto alloc_mem_err;
2944
2945 rc = bnxt_alloc_cp_rings(bp);
2946 if (rc)
2947 goto alloc_mem_err;
2948
2949 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2950 BNXT_VNIC_UCAST_FLAG;
2951 rc = bnxt_alloc_vnic_attributes(bp);
2952 if (rc)
2953 goto alloc_mem_err;
2954 return 0;
2955
2956alloc_mem_err:
2957 bnxt_free_mem(bp, true);
2958 return rc;
2959}
2960
Michael Chan9d8bc092016-12-29 12:13:33 -05002961static void bnxt_disable_int(struct bnxt *bp)
2962{
2963 int i;
2964
2965 if (!bp->bnapi)
2966 return;
2967
2968 for (i = 0; i < bp->cp_nr_rings; i++) {
2969 struct bnxt_napi *bnapi = bp->bnapi[i];
2970 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2971
2972 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
2973 }
2974}
2975
2976static void bnxt_disable_int_sync(struct bnxt *bp)
2977{
2978 int i;
2979
2980 atomic_inc(&bp->intr_sem);
2981
2982 bnxt_disable_int(bp);
2983 for (i = 0; i < bp->cp_nr_rings; i++)
2984 synchronize_irq(bp->irq_tbl[i].vector);
2985}
2986
2987static void bnxt_enable_int(struct bnxt *bp)
2988{
2989 int i;
2990
2991 atomic_set(&bp->intr_sem, 0);
2992 for (i = 0; i < bp->cp_nr_rings; i++) {
2993 struct bnxt_napi *bnapi = bp->bnapi[i];
2994 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2995
2996 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
2997 }
2998}
2999
Michael Chanc0c050c2015-10-22 16:01:17 -04003000void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3001 u16 cmpl_ring, u16 target_id)
3002{
Michael Chana8643e12016-02-26 04:00:05 -05003003 struct input *req = request;
Michael Chanc0c050c2015-10-22 16:01:17 -04003004
Michael Chana8643e12016-02-26 04:00:05 -05003005 req->req_type = cpu_to_le16(req_type);
3006 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3007 req->target_id = cpu_to_le16(target_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003008 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3009}
3010
Michael Chanfbfbc482016-02-26 04:00:07 -05003011static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3012 int timeout, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003013{
Michael Chana11fa2b2016-05-15 03:04:47 -04003014 int i, intr_process, rc, tmo_count;
Michael Chana8643e12016-02-26 04:00:05 -05003015 struct input *req = msg;
Michael Chanc0c050c2015-10-22 16:01:17 -04003016 u32 *data = msg;
3017 __le32 *resp_len, *valid;
3018 u16 cp_ring_id, len = 0;
3019 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3020
Michael Chana8643e12016-02-26 04:00:05 -05003021 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
Michael Chanc0c050c2015-10-22 16:01:17 -04003022 memset(resp, 0, PAGE_SIZE);
Michael Chana8643e12016-02-26 04:00:05 -05003023 cp_ring_id = le16_to_cpu(req->cmpl_ring);
Michael Chanc0c050c2015-10-22 16:01:17 -04003024 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3025
3026 /* Write request msg to hwrm channel */
3027 __iowrite32_copy(bp->bar0, data, msg_len / 4);
3028
Michael Chane6ef2692016-03-28 19:46:05 -04003029 for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
Michael Chand79979a2016-01-07 19:56:57 -05003030 writel(0, bp->bar0 + i);
3031
Michael Chanc0c050c2015-10-22 16:01:17 -04003032 /* currently supports only one outstanding message */
3033 if (intr_process)
Michael Chana8643e12016-02-26 04:00:05 -05003034 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003035
3036 /* Ring channel doorbell */
3037 writel(1, bp->bar0 + 0x100);
3038
Michael Chanff4fe812016-02-26 04:00:04 -05003039 if (!timeout)
3040 timeout = DFLT_HWRM_CMD_TIMEOUT;
3041
Michael Chanc0c050c2015-10-22 16:01:17 -04003042 i = 0;
Michael Chana11fa2b2016-05-15 03:04:47 -04003043 tmo_count = timeout * 40;
Michael Chanc0c050c2015-10-22 16:01:17 -04003044 if (intr_process) {
3045 /* Wait until hwrm response cmpl interrupt is processed */
3046 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
Michael Chana11fa2b2016-05-15 03:04:47 -04003047 i++ < tmo_count) {
3048 usleep_range(25, 40);
Michael Chanc0c050c2015-10-22 16:01:17 -04003049 }
3050
3051 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3052 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
Michael Chana8643e12016-02-26 04:00:05 -05003053 le16_to_cpu(req->req_type));
Michael Chanc0c050c2015-10-22 16:01:17 -04003054 return -1;
3055 }
3056 } else {
3057 /* Check if response len is updated */
3058 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
Michael Chana11fa2b2016-05-15 03:04:47 -04003059 for (i = 0; i < tmo_count; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003060 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3061 HWRM_RESP_LEN_SFT;
3062 if (len)
3063 break;
Michael Chana11fa2b2016-05-15 03:04:47 -04003064 usleep_range(25, 40);
Michael Chanc0c050c2015-10-22 16:01:17 -04003065 }
3066
Michael Chana11fa2b2016-05-15 03:04:47 -04003067 if (i >= tmo_count) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003068 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
Michael Chana8643e12016-02-26 04:00:05 -05003069 timeout, le16_to_cpu(req->req_type),
Michael Chan8578d6c2016-05-15 03:04:48 -04003070 le16_to_cpu(req->seq_id), len);
Michael Chanc0c050c2015-10-22 16:01:17 -04003071 return -1;
3072 }
3073
3074 /* Last word of resp contains valid bit */
3075 valid = bp->hwrm_cmd_resp_addr + len - 4;
Michael Chana11fa2b2016-05-15 03:04:47 -04003076 for (i = 0; i < 5; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003077 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3078 break;
Michael Chana11fa2b2016-05-15 03:04:47 -04003079 udelay(1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003080 }
3081
Michael Chana11fa2b2016-05-15 03:04:47 -04003082 if (i >= 5) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003083 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
Michael Chana8643e12016-02-26 04:00:05 -05003084 timeout, le16_to_cpu(req->req_type),
3085 le16_to_cpu(req->seq_id), len, *valid);
Michael Chanc0c050c2015-10-22 16:01:17 -04003086 return -1;
3087 }
3088 }
3089
3090 rc = le16_to_cpu(resp->error_code);
Michael Chanfbfbc482016-02-26 04:00:07 -05003091 if (rc && !silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003092 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3093 le16_to_cpu(resp->req_type),
3094 le16_to_cpu(resp->seq_id), rc);
Michael Chanfbfbc482016-02-26 04:00:07 -05003095 return rc;
3096}
3097
3098int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3099{
3100 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04003101}
3102
3103int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3104{
3105 int rc;
3106
3107 mutex_lock(&bp->hwrm_cmd_lock);
3108 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3109 mutex_unlock(&bp->hwrm_cmd_lock);
3110 return rc;
3111}
3112
Michael Chan90e209212016-02-26 04:00:08 -05003113int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3114 int timeout)
3115{
3116 int rc;
3117
3118 mutex_lock(&bp->hwrm_cmd_lock);
3119 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3120 mutex_unlock(&bp->hwrm_cmd_lock);
3121 return rc;
3122}
3123
Michael Chana1653b12016-12-07 00:26:20 -05003124int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3125 int bmap_size)
Michael Chanc0c050c2015-10-22 16:01:17 -04003126{
3127 struct hwrm_func_drv_rgtr_input req = {0};
Michael Chan25be8622016-04-05 14:09:00 -04003128 DECLARE_BITMAP(async_events_bmap, 256);
3129 u32 *events = (u32 *)async_events_bmap;
Michael Chana1653b12016-12-07 00:26:20 -05003130 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003131
3132 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3133
3134 req.enables =
Michael Chana1653b12016-12-07 00:26:20 -05003135 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
Michael Chanc0c050c2015-10-22 16:01:17 -04003136
Michael Chan25be8622016-04-05 14:09:00 -04003137 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3138 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3139 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3140
Michael Chana1653b12016-12-07 00:26:20 -05003141 if (bmap && bmap_size) {
3142 for (i = 0; i < bmap_size; i++) {
3143 if (test_bit(i, bmap))
3144 __set_bit(i, async_events_bmap);
3145 }
3146 }
3147
Michael Chan25be8622016-04-05 14:09:00 -04003148 for (i = 0; i < 8; i++)
3149 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3150
Michael Chana1653b12016-12-07 00:26:20 -05003151 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3152}
3153
3154static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3155{
3156 struct hwrm_func_drv_rgtr_input req = {0};
3157
3158 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3159
3160 req.enables =
3161 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3162 FUNC_DRV_RGTR_REQ_ENABLES_VER);
3163
Michael Chan11f15ed2016-04-05 14:08:55 -04003164 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
Michael Chanc0c050c2015-10-22 16:01:17 -04003165 req.ver_maj = DRV_VER_MAJ;
3166 req.ver_min = DRV_VER_MIN;
3167 req.ver_upd = DRV_VER_UPD;
3168
3169 if (BNXT_PF(bp)) {
Michael Chande68f5de2015-12-09 19:35:41 -05003170 DECLARE_BITMAP(vf_req_snif_bmap, 256);
Michael Chanc0c050c2015-10-22 16:01:17 -04003171 u32 *data = (u32 *)vf_req_snif_bmap;
Michael Chana1653b12016-12-07 00:26:20 -05003172 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003173
Michael Chande68f5de2015-12-09 19:35:41 -05003174 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
Michael Chanc0c050c2015-10-22 16:01:17 -04003175 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
3176 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
3177
Michael Chande68f5de2015-12-09 19:35:41 -05003178 for (i = 0; i < 8; i++)
3179 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3180
Michael Chanc0c050c2015-10-22 16:01:17 -04003181 req.enables |=
3182 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3183 }
3184
3185 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3186}
3187
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05003188static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3189{
3190 struct hwrm_func_drv_unrgtr_input req = {0};
3191
3192 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3193 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3194}
3195
Michael Chanc0c050c2015-10-22 16:01:17 -04003196static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3197{
3198 u32 rc = 0;
3199 struct hwrm_tunnel_dst_port_free_input req = {0};
3200
3201 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3202 req.tunnel_type = tunnel_type;
3203
3204 switch (tunnel_type) {
3205 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3206 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3207 break;
3208 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3209 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3210 break;
3211 default:
3212 break;
3213 }
3214
3215 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3216 if (rc)
3217 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3218 rc);
3219 return rc;
3220}
3221
3222static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3223 u8 tunnel_type)
3224{
3225 u32 rc = 0;
3226 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3227 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3228
3229 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3230
3231 req.tunnel_type = tunnel_type;
3232 req.tunnel_dst_port_val = port;
3233
3234 mutex_lock(&bp->hwrm_cmd_lock);
3235 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3236 if (rc) {
3237 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3238 rc);
3239 goto err_out;
3240 }
3241
Christophe Jaillet57aac712016-11-22 06:14:40 +01003242 switch (tunnel_type) {
3243 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
Michael Chanc0c050c2015-10-22 16:01:17 -04003244 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01003245 break;
3246 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
Michael Chanc0c050c2015-10-22 16:01:17 -04003247 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01003248 break;
3249 default:
3250 break;
3251 }
3252
Michael Chanc0c050c2015-10-22 16:01:17 -04003253err_out:
3254 mutex_unlock(&bp->hwrm_cmd_lock);
3255 return rc;
3256}
3257
3258static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3259{
3260 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3261 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3262
3263 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -05003264 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003265
3266 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3267 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3268 req.mask = cpu_to_le32(vnic->rx_mask);
3269 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3270}
3271
3272#ifdef CONFIG_RFS_ACCEL
3273static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3274 struct bnxt_ntuple_filter *fltr)
3275{
3276 struct hwrm_cfa_ntuple_filter_free_input req = {0};
3277
3278 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3279 req.ntuple_filter_id = fltr->filter_id;
3280 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3281}
3282
3283#define BNXT_NTP_FLTR_FLAGS \
3284 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
3285 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
3286 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
3287 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
3288 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
3289 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
3290 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
3291 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
3292 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
3293 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
3294 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
3295 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
3296 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
Michael Chanc1935542015-12-27 18:19:28 -05003297 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04003298
3299static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3300 struct bnxt_ntuple_filter *fltr)
3301{
3302 int rc = 0;
3303 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3304 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3305 bp->hwrm_cmd_resp_addr;
3306 struct flow_keys *keys = &fltr->fkeys;
3307 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3308
3309 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
Michael Chana54c4d72016-07-25 12:33:35 -04003310 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04003311
3312 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3313
3314 req.ethertype = htons(ETH_P_IP);
3315 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
Michael Chanc1935542015-12-27 18:19:28 -05003316 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
Michael Chanc0c050c2015-10-22 16:01:17 -04003317 req.ip_protocol = keys->basic.ip_proto;
3318
Michael Chandda0e742016-12-29 12:13:40 -05003319 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
3320 int i;
3321
3322 req.ethertype = htons(ETH_P_IPV6);
3323 req.ip_addr_type =
3324 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
3325 *(struct in6_addr *)&req.src_ipaddr[0] =
3326 keys->addrs.v6addrs.src;
3327 *(struct in6_addr *)&req.dst_ipaddr[0] =
3328 keys->addrs.v6addrs.dst;
3329 for (i = 0; i < 4; i++) {
3330 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3331 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3332 }
3333 } else {
3334 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3335 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3336 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3337 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3338 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003339
3340 req.src_port = keys->ports.src;
3341 req.src_port_mask = cpu_to_be16(0xffff);
3342 req.dst_port = keys->ports.dst;
3343 req.dst_port_mask = cpu_to_be16(0xffff);
3344
Michael Chanc1935542015-12-27 18:19:28 -05003345 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003346 mutex_lock(&bp->hwrm_cmd_lock);
3347 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3348 if (!rc)
3349 fltr->filter_id = resp->ntuple_filter_id;
3350 mutex_unlock(&bp->hwrm_cmd_lock);
3351 return rc;
3352}
3353#endif
3354
3355static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3356 u8 *mac_addr)
3357{
3358 u32 rc = 0;
3359 struct hwrm_cfa_l2_filter_alloc_input req = {0};
3360 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3361
3362 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003363 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3364 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3365 req.flags |=
3366 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
Michael Chanc1935542015-12-27 18:19:28 -05003367 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003368 req.enables =
3369 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
Michael Chanc1935542015-12-27 18:19:28 -05003370 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
Michael Chanc0c050c2015-10-22 16:01:17 -04003371 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3372 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3373 req.l2_addr_mask[0] = 0xff;
3374 req.l2_addr_mask[1] = 0xff;
3375 req.l2_addr_mask[2] = 0xff;
3376 req.l2_addr_mask[3] = 0xff;
3377 req.l2_addr_mask[4] = 0xff;
3378 req.l2_addr_mask[5] = 0xff;
3379
3380 mutex_lock(&bp->hwrm_cmd_lock);
3381 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3382 if (!rc)
3383 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3384 resp->l2_filter_id;
3385 mutex_unlock(&bp->hwrm_cmd_lock);
3386 return rc;
3387}
3388
3389static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3390{
3391 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3392 int rc = 0;
3393
3394 /* Any associated ntuple filters will also be cleared by firmware. */
3395 mutex_lock(&bp->hwrm_cmd_lock);
3396 for (i = 0; i < num_of_vnics; i++) {
3397 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3398
3399 for (j = 0; j < vnic->uc_filter_count; j++) {
3400 struct hwrm_cfa_l2_filter_free_input req = {0};
3401
3402 bnxt_hwrm_cmd_hdr_init(bp, &req,
3403 HWRM_CFA_L2_FILTER_FREE, -1, -1);
3404
3405 req.l2_filter_id = vnic->fw_l2_filter_id[j];
3406
3407 rc = _hwrm_send_message(bp, &req, sizeof(req),
3408 HWRM_CMD_TIMEOUT);
3409 }
3410 vnic->uc_filter_count = 0;
3411 }
3412 mutex_unlock(&bp->hwrm_cmd_lock);
3413
3414 return rc;
3415}
3416
3417static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3418{
3419 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3420 struct hwrm_vnic_tpa_cfg_input req = {0};
3421
3422 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3423
3424 if (tpa_flags) {
3425 u16 mss = bp->dev->mtu - 40;
3426 u32 nsegs, n, segs = 0, flags;
3427
3428 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3429 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3430 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3431 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3432 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3433 if (tpa_flags & BNXT_FLAG_GRO)
3434 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3435
3436 req.flags = cpu_to_le32(flags);
3437
3438 req.enables =
3439 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
Michael Chanc1935542015-12-27 18:19:28 -05003440 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3441 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04003442
3443 /* Number of segs are log2 units, and first packet is not
3444 * included as part of this units.
3445 */
Michael Chan2839f282016-04-25 02:30:50 -04003446 if (mss <= BNXT_RX_PAGE_SIZE) {
3447 n = BNXT_RX_PAGE_SIZE / mss;
Michael Chanc0c050c2015-10-22 16:01:17 -04003448 nsegs = (MAX_SKB_FRAGS - 1) * n;
3449 } else {
Michael Chan2839f282016-04-25 02:30:50 -04003450 n = mss / BNXT_RX_PAGE_SIZE;
3451 if (mss & (BNXT_RX_PAGE_SIZE - 1))
Michael Chanc0c050c2015-10-22 16:01:17 -04003452 n++;
3453 nsegs = (MAX_SKB_FRAGS - n) / n;
3454 }
3455
3456 segs = ilog2(nsegs);
3457 req.max_agg_segs = cpu_to_le16(segs);
3458 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
Michael Chanc1935542015-12-27 18:19:28 -05003459
3460 req.min_agg_len = cpu_to_le32(512);
Michael Chanc0c050c2015-10-22 16:01:17 -04003461 }
3462 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3463
3464 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3465}
3466
3467static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3468{
3469 u32 i, j, max_rings;
3470 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3471 struct hwrm_vnic_rss_cfg_input req = {0};
3472
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003473 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04003474 return 0;
3475
3476 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3477 if (set_rss) {
Michael Chan87da7f72016-11-16 21:13:09 -05003478 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003479 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3480 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3481 max_rings = bp->rx_nr_rings - 1;
3482 else
3483 max_rings = bp->rx_nr_rings;
3484 } else {
Michael Chanc0c050c2015-10-22 16:01:17 -04003485 max_rings = 1;
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003486 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003487
3488 /* Fill the RSS indirection table with ring group ids */
3489 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3490 if (j == max_rings)
3491 j = 0;
3492 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3493 }
3494
3495 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3496 req.hash_key_tbl_addr =
3497 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3498 }
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003499 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
Michael Chanc0c050c2015-10-22 16:01:17 -04003500 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3501}
3502
3503static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3504{
3505 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3506 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3507
3508 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3509 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3510 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3511 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3512 req.enables =
3513 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3514 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3515 /* thresholds not implemented in firmware yet */
3516 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3517 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3518 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3519 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3520}
3521
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003522static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3523 u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04003524{
3525 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3526
3527 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3528 req.rss_cos_lb_ctx_id =
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003529 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
Michael Chanc0c050c2015-10-22 16:01:17 -04003530
3531 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003532 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003533}
3534
3535static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3536{
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003537 int i, j;
Michael Chanc0c050c2015-10-22 16:01:17 -04003538
3539 for (i = 0; i < bp->nr_vnics; i++) {
3540 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3541
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003542 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3543 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3544 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3545 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003546 }
3547 bp->rsscos_nr_ctxs = 0;
3548}
3549
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003550static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04003551{
3552 int rc;
3553 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3554 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3555 bp->hwrm_cmd_resp_addr;
3556
3557 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3558 -1);
3559
3560 mutex_lock(&bp->hwrm_cmd_lock);
3561 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3562 if (!rc)
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003563 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
Michael Chanc0c050c2015-10-22 16:01:17 -04003564 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3565 mutex_unlock(&bp->hwrm_cmd_lock);
3566
3567 return rc;
3568}
3569
Michael Chana588e452016-12-07 00:26:21 -05003570int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
Michael Chanc0c050c2015-10-22 16:01:17 -04003571{
Michael Chanb81a90d2016-01-02 23:45:01 -05003572 unsigned int ring = 0, grp_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04003573 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3574 struct hwrm_vnic_cfg_input req = {0};
Michael Chancf6645f2016-06-13 02:25:28 -04003575 u16 def_vlan = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003576
3577 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003578
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003579 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
3580 /* Only RSS support for now TBD: COS & LB */
3581 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3582 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3583 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3584 VNIC_CFG_REQ_ENABLES_MRU);
Michael Chanae10ae72016-12-29 12:13:38 -05003585 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
3586 req.rss_rule =
3587 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
3588 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3589 VNIC_CFG_REQ_ENABLES_MRU);
3590 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003591 } else {
3592 req.rss_rule = cpu_to_le16(0xffff);
3593 }
3594
3595 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3596 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003597 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3598 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3599 } else {
3600 req.cos_rule = cpu_to_le16(0xffff);
3601 }
3602
Michael Chanc0c050c2015-10-22 16:01:17 -04003603 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05003604 ring = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003605 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05003606 ring = vnic_id - 1;
Prashant Sreedharan76595192016-07-18 07:15:22 -04003607 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
3608 ring = bp->rx_nr_rings - 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04003609
Michael Chanb81a90d2016-01-02 23:45:01 -05003610 grp_idx = bp->rx_ring[ring].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003611 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3612 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3613
3614 req.lb_rule = cpu_to_le16(0xffff);
3615 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3616 VLAN_HLEN);
3617
Michael Chancf6645f2016-06-13 02:25:28 -04003618#ifdef CONFIG_BNXT_SRIOV
3619 if (BNXT_VF(bp))
3620 def_vlan = bp->vf.vlan;
3621#endif
3622 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
Michael Chanc0c050c2015-10-22 16:01:17 -04003623 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
Michael Chana588e452016-12-07 00:26:21 -05003624 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
3625 req.flags |=
3626 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
Michael Chanc0c050c2015-10-22 16:01:17 -04003627
3628 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3629}
3630
3631static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3632{
3633 u32 rc = 0;
3634
3635 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3636 struct hwrm_vnic_free_input req = {0};
3637
3638 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3639 req.vnic_id =
3640 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3641
3642 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3643 if (rc)
3644 return rc;
3645 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3646 }
3647 return rc;
3648}
3649
3650static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3651{
3652 u16 i;
3653
3654 for (i = 0; i < bp->nr_vnics; i++)
3655 bnxt_hwrm_vnic_free_one(bp, i);
3656}
3657
Michael Chanb81a90d2016-01-02 23:45:01 -05003658static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3659 unsigned int start_rx_ring_idx,
3660 unsigned int nr_rings)
Michael Chanc0c050c2015-10-22 16:01:17 -04003661{
Michael Chanb81a90d2016-01-02 23:45:01 -05003662 int rc = 0;
3663 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04003664 struct hwrm_vnic_alloc_input req = {0};
3665 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3666
3667 /* map ring groups to this vnic */
Michael Chanb81a90d2016-01-02 23:45:01 -05003668 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3669 grp_idx = bp->rx_ring[i].bnapi->index;
3670 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003671 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05003672 j, nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04003673 break;
3674 }
3675 bp->vnic_info[vnic_id].fw_grp_ids[j] =
Michael Chanb81a90d2016-01-02 23:45:01 -05003676 bp->grp_info[grp_idx].fw_grp_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003677 }
3678
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003679 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
3680 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003681 if (vnic_id == 0)
3682 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3683
3684 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3685
3686 mutex_lock(&bp->hwrm_cmd_lock);
3687 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3688 if (!rc)
3689 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3690 mutex_unlock(&bp->hwrm_cmd_lock);
3691 return rc;
3692}
3693
Michael Chan8fdefd62016-12-29 12:13:36 -05003694static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
3695{
3696 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3697 struct hwrm_vnic_qcaps_input req = {0};
3698 int rc;
3699
3700 if (bp->hwrm_spec_code < 0x10600)
3701 return 0;
3702
3703 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
3704 mutex_lock(&bp->hwrm_cmd_lock);
3705 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3706 if (!rc) {
3707 if (resp->flags &
3708 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
3709 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
3710 }
3711 mutex_unlock(&bp->hwrm_cmd_lock);
3712 return rc;
3713}
3714
Michael Chanc0c050c2015-10-22 16:01:17 -04003715static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3716{
3717 u16 i;
3718 u32 rc = 0;
3719
3720 mutex_lock(&bp->hwrm_cmd_lock);
3721 for (i = 0; i < bp->rx_nr_rings; i++) {
3722 struct hwrm_ring_grp_alloc_input req = {0};
3723 struct hwrm_ring_grp_alloc_output *resp =
3724 bp->hwrm_cmd_resp_addr;
Michael Chanb81a90d2016-01-02 23:45:01 -05003725 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003726
3727 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3728
Michael Chanb81a90d2016-01-02 23:45:01 -05003729 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
3730 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
3731 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
3732 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
Michael Chanc0c050c2015-10-22 16:01:17 -04003733
3734 rc = _hwrm_send_message(bp, &req, sizeof(req),
3735 HWRM_CMD_TIMEOUT);
3736 if (rc)
3737 break;
3738
Michael Chanb81a90d2016-01-02 23:45:01 -05003739 bp->grp_info[grp_idx].fw_grp_id =
3740 le32_to_cpu(resp->ring_group_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003741 }
3742 mutex_unlock(&bp->hwrm_cmd_lock);
3743 return rc;
3744}
3745
3746static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3747{
3748 u16 i;
3749 u32 rc = 0;
3750 struct hwrm_ring_grp_free_input req = {0};
3751
3752 if (!bp->grp_info)
3753 return 0;
3754
3755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3756
3757 mutex_lock(&bp->hwrm_cmd_lock);
3758 for (i = 0; i < bp->cp_nr_rings; i++) {
3759 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3760 continue;
3761 req.ring_group_id =
3762 cpu_to_le32(bp->grp_info[i].fw_grp_id);
3763
3764 rc = _hwrm_send_message(bp, &req, sizeof(req),
3765 HWRM_CMD_TIMEOUT);
3766 if (rc)
3767 break;
3768 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3769 }
3770 mutex_unlock(&bp->hwrm_cmd_lock);
3771 return rc;
3772}
3773
3774static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3775 struct bnxt_ring_struct *ring,
3776 u32 ring_type, u32 map_index,
3777 u32 stats_ctx_id)
3778{
3779 int rc = 0, err = 0;
3780 struct hwrm_ring_alloc_input req = {0};
3781 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3782 u16 ring_id;
3783
3784 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3785
3786 req.enables = 0;
3787 if (ring->nr_pages > 1) {
3788 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3789 /* Page size is in log2 units */
3790 req.page_size = BNXT_PAGE_SHIFT;
3791 req.page_tbl_depth = 1;
3792 } else {
3793 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
3794 }
3795 req.fbo = 0;
3796 /* Association of ring index with doorbell index and MSIX number */
3797 req.logical_id = cpu_to_le16(map_index);
3798
3799 switch (ring_type) {
3800 case HWRM_RING_ALLOC_TX:
3801 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3802 /* Association of transmit ring with completion ring */
3803 req.cmpl_ring_id =
3804 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3805 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3806 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3807 req.queue_id = cpu_to_le16(ring->queue_id);
3808 break;
3809 case HWRM_RING_ALLOC_RX:
3810 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3811 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3812 break;
3813 case HWRM_RING_ALLOC_AGG:
3814 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3815 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3816 break;
3817 case HWRM_RING_ALLOC_CMPL:
3818 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3819 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3820 if (bp->flags & BNXT_FLAG_USING_MSIX)
3821 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3822 break;
3823 default:
3824 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3825 ring_type);
3826 return -1;
3827 }
3828
3829 mutex_lock(&bp->hwrm_cmd_lock);
3830 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3831 err = le16_to_cpu(resp->error_code);
3832 ring_id = le16_to_cpu(resp->ring_id);
3833 mutex_unlock(&bp->hwrm_cmd_lock);
3834
3835 if (rc || err) {
3836 switch (ring_type) {
3837 case RING_FREE_REQ_RING_TYPE_CMPL:
3838 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3839 rc, err);
3840 return -1;
3841
3842 case RING_FREE_REQ_RING_TYPE_RX:
3843 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3844 rc, err);
3845 return -1;
3846
3847 case RING_FREE_REQ_RING_TYPE_TX:
3848 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3849 rc, err);
3850 return -1;
3851
3852 default:
3853 netdev_err(bp->dev, "Invalid ring\n");
3854 return -1;
3855 }
3856 }
3857 ring->fw_ring_id = ring_id;
3858 return rc;
3859}
3860
3861static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3862{
3863 int i, rc = 0;
3864
Michael Chanedd0c2c2015-12-27 18:19:19 -05003865 for (i = 0; i < bp->cp_nr_rings; i++) {
3866 struct bnxt_napi *bnapi = bp->bnapi[i];
3867 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3868 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04003869
Prashant Sreedharan33e52d82016-03-28 19:46:04 -04003870 cpr->cp_doorbell = bp->bar1 + i * 0x80;
Michael Chanedd0c2c2015-12-27 18:19:19 -05003871 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3872 INVALID_STATS_CTX_ID);
3873 if (rc)
3874 goto err_out;
Michael Chanedd0c2c2015-12-27 18:19:19 -05003875 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3876 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003877 }
3878
Michael Chanedd0c2c2015-12-27 18:19:19 -05003879 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003880 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003881 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003882 u32 map_idx = txr->bnapi->index;
3883 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
Michael Chanc0c050c2015-10-22 16:01:17 -04003884
Michael Chanb81a90d2016-01-02 23:45:01 -05003885 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
3886 map_idx, fw_stats_ctx);
Michael Chanedd0c2c2015-12-27 18:19:19 -05003887 if (rc)
3888 goto err_out;
Michael Chanb81a90d2016-01-02 23:45:01 -05003889 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanc0c050c2015-10-22 16:01:17 -04003890 }
3891
Michael Chanedd0c2c2015-12-27 18:19:19 -05003892 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003893 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003894 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003895 u32 map_idx = rxr->bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003896
Michael Chanb81a90d2016-01-02 23:45:01 -05003897 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
3898 map_idx, INVALID_STATS_CTX_ID);
Michael Chanedd0c2c2015-12-27 18:19:19 -05003899 if (rc)
3900 goto err_out;
Michael Chanb81a90d2016-01-02 23:45:01 -05003901 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanedd0c2c2015-12-27 18:19:19 -05003902 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
Michael Chanb81a90d2016-01-02 23:45:01 -05003903 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003904 }
3905
3906 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3907 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003908 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04003909 struct bnxt_ring_struct *ring =
3910 &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003911 u32 grp_idx = rxr->bnapi->index;
3912 u32 map_idx = grp_idx + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04003913
3914 rc = hwrm_ring_alloc_send_msg(bp, ring,
3915 HWRM_RING_ALLOC_AGG,
Michael Chanb81a90d2016-01-02 23:45:01 -05003916 map_idx,
Michael Chanc0c050c2015-10-22 16:01:17 -04003917 INVALID_STATS_CTX_ID);
3918 if (rc)
3919 goto err_out;
3920
Michael Chanb81a90d2016-01-02 23:45:01 -05003921 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanc0c050c2015-10-22 16:01:17 -04003922 writel(DB_KEY_RX | rxr->rx_agg_prod,
3923 rxr->rx_agg_doorbell);
Michael Chanb81a90d2016-01-02 23:45:01 -05003924 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003925 }
3926 }
3927err_out:
3928 return rc;
3929}
3930
3931static int hwrm_ring_free_send_msg(struct bnxt *bp,
3932 struct bnxt_ring_struct *ring,
3933 u32 ring_type, int cmpl_ring_id)
3934{
3935 int rc;
3936 struct hwrm_ring_free_input req = {0};
3937 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3938 u16 error_code;
3939
Prashant Sreedharan74608fc2016-01-28 03:11:20 -05003940 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003941 req.ring_type = ring_type;
3942 req.ring_id = cpu_to_le16(ring->fw_ring_id);
3943
3944 mutex_lock(&bp->hwrm_cmd_lock);
3945 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3946 error_code = le16_to_cpu(resp->error_code);
3947 mutex_unlock(&bp->hwrm_cmd_lock);
3948
3949 if (rc || error_code) {
3950 switch (ring_type) {
3951 case RING_FREE_REQ_RING_TYPE_CMPL:
3952 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3953 rc);
3954 return rc;
3955 case RING_FREE_REQ_RING_TYPE_RX:
3956 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3957 rc);
3958 return rc;
3959 case RING_FREE_REQ_RING_TYPE_TX:
3960 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3961 rc);
3962 return rc;
3963 default:
3964 netdev_err(bp->dev, "Invalid ring\n");
3965 return -1;
3966 }
3967 }
3968 return 0;
3969}
3970
Michael Chanedd0c2c2015-12-27 18:19:19 -05003971static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
Michael Chanc0c050c2015-10-22 16:01:17 -04003972{
Michael Chanedd0c2c2015-12-27 18:19:19 -05003973 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003974
3975 if (!bp->bnapi)
Michael Chanedd0c2c2015-12-27 18:19:19 -05003976 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04003977
Michael Chanedd0c2c2015-12-27 18:19:19 -05003978 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003979 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003980 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003981 u32 grp_idx = txr->bnapi->index;
3982 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003983
Michael Chanedd0c2c2015-12-27 18:19:19 -05003984 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3985 hwrm_ring_free_send_msg(bp, ring,
3986 RING_FREE_REQ_RING_TYPE_TX,
3987 close_path ? cmpl_ring_id :
3988 INVALID_HW_RING_ID);
3989 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003990 }
3991 }
3992
Michael Chanedd0c2c2015-12-27 18:19:19 -05003993 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003994 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003995 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003996 u32 grp_idx = rxr->bnapi->index;
3997 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003998
Michael Chanedd0c2c2015-12-27 18:19:19 -05003999 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4000 hwrm_ring_free_send_msg(bp, ring,
4001 RING_FREE_REQ_RING_TYPE_RX,
4002 close_path ? cmpl_ring_id :
4003 INVALID_HW_RING_ID);
4004 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05004005 bp->grp_info[grp_idx].rx_fw_ring_id =
4006 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004007 }
4008 }
4009
Michael Chanedd0c2c2015-12-27 18:19:19 -05004010 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004011 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05004012 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05004013 u32 grp_idx = rxr->bnapi->index;
4014 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004015
Michael Chanedd0c2c2015-12-27 18:19:19 -05004016 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4017 hwrm_ring_free_send_msg(bp, ring,
4018 RING_FREE_REQ_RING_TYPE_RX,
4019 close_path ? cmpl_ring_id :
4020 INVALID_HW_RING_ID);
4021 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05004022 bp->grp_info[grp_idx].agg_fw_ring_id =
4023 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004024 }
4025 }
4026
Michael Chan9d8bc092016-12-29 12:13:33 -05004027 /* The completion rings are about to be freed. After that the
4028 * IRQ doorbell will not work anymore. So we need to disable
4029 * IRQ here.
4030 */
4031 bnxt_disable_int_sync(bp);
4032
Michael Chanedd0c2c2015-12-27 18:19:19 -05004033 for (i = 0; i < bp->cp_nr_rings; i++) {
4034 struct bnxt_napi *bnapi = bp->bnapi[i];
4035 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4036 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04004037
Michael Chanedd0c2c2015-12-27 18:19:19 -05004038 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4039 hwrm_ring_free_send_msg(bp, ring,
4040 RING_FREE_REQ_RING_TYPE_CMPL,
4041 INVALID_HW_RING_ID);
4042 ring->fw_ring_id = INVALID_HW_RING_ID;
4043 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004044 }
4045 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004046}
4047
Michael Chan391be5c2016-12-29 12:13:41 -05004048/* Caller must hold bp->hwrm_cmd_lock */
4049int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4050{
4051 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4052 struct hwrm_func_qcfg_input req = {0};
4053 int rc;
4054
4055 if (bp->hwrm_spec_code < 0x10601)
4056 return 0;
4057
4058 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4059 req.fid = cpu_to_le16(fid);
4060 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4061 if (!rc)
4062 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
4063
4064 return rc;
4065}
4066
4067int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
4068{
4069 struct hwrm_func_cfg_input req = {0};
4070 int rc;
4071
4072 if (bp->hwrm_spec_code < 0x10601)
4073 return 0;
4074
4075 if (BNXT_VF(bp))
4076 return 0;
4077
4078 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4079 req.fid = cpu_to_le16(0xffff);
4080 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4081 req.num_tx_rings = cpu_to_le16(*tx_rings);
4082 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4083 if (rc)
4084 return rc;
4085
4086 mutex_lock(&bp->hwrm_cmd_lock);
4087 rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
4088 mutex_unlock(&bp->hwrm_cmd_lock);
4089 return rc;
4090}
4091
Michael Chanbb053f52016-02-26 04:00:02 -05004092static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
4093 u32 buf_tmrs, u16 flags,
4094 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4095{
4096 req->flags = cpu_to_le16(flags);
4097 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
4098 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
4099 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
4100 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
4101 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4102 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
4103 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
4104 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
4105}
4106
Michael Chanc0c050c2015-10-22 16:01:17 -04004107int bnxt_hwrm_set_coal(struct bnxt *bp)
4108{
4109 int i, rc = 0;
Michael Chandfc9c942016-02-26 04:00:03 -05004110 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
4111 req_tx = {0}, *req;
Michael Chanc0c050c2015-10-22 16:01:17 -04004112 u16 max_buf, max_buf_irq;
4113 u16 buf_tmr, buf_tmr_irq;
4114 u32 flags;
4115
Michael Chandfc9c942016-02-26 04:00:03 -05004116 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4117 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4118 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
4119 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004120
Michael Chandfb5b892016-02-26 04:00:01 -05004121 /* Each rx completion (2 records) should be DMAed immediately.
4122 * DMA 1/4 of the completion buffers at a time.
4123 */
4124 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
Michael Chanc0c050c2015-10-22 16:01:17 -04004125 /* max_buf must not be zero */
4126 max_buf = clamp_t(u16, max_buf, 1, 63);
Michael Chandfb5b892016-02-26 04:00:01 -05004127 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4128 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4129 /* buf timer set to 1/4 of interrupt timer */
4130 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4131 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4132 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004133
4134 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4135
4136 /* RING_IDLE generates more IRQs for lower latency. Enable it only
4137 * if coal_ticks is less than 25 us.
4138 */
Michael Chandfb5b892016-02-26 04:00:01 -05004139 if (bp->rx_coal_ticks < 25)
Michael Chanc0c050c2015-10-22 16:01:17 -04004140 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4141
Michael Chanbb053f52016-02-26 04:00:02 -05004142 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
Michael Chandfc9c942016-02-26 04:00:03 -05004143 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4144
4145 /* max_buf must not be zero */
4146 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4147 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4148 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4149 /* buf timer set to 1/4 of interrupt timer */
4150 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4151 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4152 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4153
4154 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4155 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4156 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004157
4158 mutex_lock(&bp->hwrm_cmd_lock);
4159 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chandfc9c942016-02-26 04:00:03 -05004160 struct bnxt_napi *bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04004161
Michael Chandfc9c942016-02-26 04:00:03 -05004162 req = &req_rx;
4163 if (!bnapi->rx_ring)
4164 req = &req_tx;
4165 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4166
4167 rc = _hwrm_send_message(bp, req, sizeof(*req),
Michael Chanc0c050c2015-10-22 16:01:17 -04004168 HWRM_CMD_TIMEOUT);
4169 if (rc)
4170 break;
4171 }
4172 mutex_unlock(&bp->hwrm_cmd_lock);
4173 return rc;
4174}
4175
4176static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4177{
4178 int rc = 0, i;
4179 struct hwrm_stat_ctx_free_input req = {0};
4180
4181 if (!bp->bnapi)
4182 return 0;
4183
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004184 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4185 return 0;
4186
Michael Chanc0c050c2015-10-22 16:01:17 -04004187 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4188
4189 mutex_lock(&bp->hwrm_cmd_lock);
4190 for (i = 0; i < bp->cp_nr_rings; i++) {
4191 struct bnxt_napi *bnapi = bp->bnapi[i];
4192 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4193
4194 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4195 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4196
4197 rc = _hwrm_send_message(bp, &req, sizeof(req),
4198 HWRM_CMD_TIMEOUT);
4199 if (rc)
4200 break;
4201
4202 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4203 }
4204 }
4205 mutex_unlock(&bp->hwrm_cmd_lock);
4206 return rc;
4207}
4208
4209static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4210{
4211 int rc = 0, i;
4212 struct hwrm_stat_ctx_alloc_input req = {0};
4213 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4214
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004215 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4216 return 0;
4217
Michael Chanc0c050c2015-10-22 16:01:17 -04004218 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4219
Michael Chan51f30782016-07-01 18:46:29 -04004220 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
Michael Chanc0c050c2015-10-22 16:01:17 -04004221
4222 mutex_lock(&bp->hwrm_cmd_lock);
4223 for (i = 0; i < bp->cp_nr_rings; i++) {
4224 struct bnxt_napi *bnapi = bp->bnapi[i];
4225 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4226
4227 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4228
4229 rc = _hwrm_send_message(bp, &req, sizeof(req),
4230 HWRM_CMD_TIMEOUT);
4231 if (rc)
4232 break;
4233
4234 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4235
4236 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4237 }
4238 mutex_unlock(&bp->hwrm_cmd_lock);
Pan Bian89aa8442016-12-03 17:56:17 +08004239 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04004240}
4241
Michael Chancf6645f2016-06-13 02:25:28 -04004242static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4243{
4244 struct hwrm_func_qcfg_input req = {0};
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04004245 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chancf6645f2016-06-13 02:25:28 -04004246 int rc;
4247
4248 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4249 req.fid = cpu_to_le16(0xffff);
4250 mutex_lock(&bp->hwrm_cmd_lock);
4251 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4252 if (rc)
4253 goto func_qcfg_exit;
4254
4255#ifdef CONFIG_BNXT_SRIOV
4256 if (BNXT_VF(bp)) {
Michael Chancf6645f2016-06-13 02:25:28 -04004257 struct bnxt_vf_info *vf = &bp->vf;
4258
4259 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4260 }
4261#endif
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04004262 switch (resp->port_partition_type) {
4263 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4264 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4265 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4266 bp->port_partition_type = resp->port_partition_type;
4267 break;
4268 }
Michael Chancf6645f2016-06-13 02:25:28 -04004269
4270func_qcfg_exit:
4271 mutex_unlock(&bp->hwrm_cmd_lock);
4272 return rc;
4273}
4274
Michael Chan7b08f662016-12-07 00:26:18 -05004275static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04004276{
4277 int rc = 0;
4278 struct hwrm_func_qcaps_input req = {0};
4279 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4280
4281 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4282 req.fid = cpu_to_le16(0xffff);
4283
4284 mutex_lock(&bp->hwrm_cmd_lock);
4285 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4286 if (rc)
4287 goto hwrm_func_qcaps_exit;
4288
Michael Chane4060d32016-12-07 00:26:19 -05004289 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
4290 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
4291 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
4292 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
4293
Michael Chan7cc5a202016-09-19 03:58:05 -04004294 bp->tx_push_thresh = 0;
4295 if (resp->flags &
4296 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4297 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4298
Michael Chanc0c050c2015-10-22 16:01:17 -04004299 if (BNXT_PF(bp)) {
4300 struct bnxt_pf_info *pf = &bp->pf;
4301
4302 pf->fw_fid = le16_to_cpu(resp->fid);
4303 pf->port_id = le16_to_cpu(resp->port_id);
Michael Chan87027db2016-07-01 18:46:28 -04004304 bp->dev->dev_port = pf->port_id;
Michael Chan11f15ed2016-04-05 14:08:55 -04004305 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
Jeffrey Huangbdd43472015-12-02 01:54:07 -05004306 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04004307 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4308 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4309 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004310 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05004311 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4312 if (!pf->max_hw_ring_grps)
4313 pf->max_hw_ring_grps = pf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004314 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4315 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4316 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4317 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4318 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4319 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4320 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4321 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4322 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4323 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4324 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
4325 } else {
Michael Chan379a80a2015-10-23 15:06:19 -04004326#ifdef CONFIG_BNXT_SRIOV
Michael Chanc0c050c2015-10-22 16:01:17 -04004327 struct bnxt_vf_info *vf = &bp->vf;
4328
4329 vf->fw_fid = le16_to_cpu(resp->fid);
Michael Chanc0c050c2015-10-22 16:01:17 -04004330
4331 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4332 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4333 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4334 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05004335 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4336 if (!vf->max_hw_ring_grps)
4337 vf->max_hw_ring_grps = vf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004338 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4339 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4340 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
Michael Chan7cc5a202016-09-19 03:58:05 -04004341
4342 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
Michael Chan001154e2016-09-19 03:58:06 -04004343 mutex_unlock(&bp->hwrm_cmd_lock);
4344
4345 if (is_valid_ether_addr(vf->mac_addr)) {
Michael Chan7cc5a202016-09-19 03:58:05 -04004346 /* overwrite netdev dev_adr with admin VF MAC */
4347 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
Michael Chan001154e2016-09-19 03:58:06 -04004348 } else {
Michael Chan7cc5a202016-09-19 03:58:05 -04004349 random_ether_addr(bp->dev->dev_addr);
Michael Chan001154e2016-09-19 03:58:06 -04004350 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4351 }
4352 return rc;
Michael Chan379a80a2015-10-23 15:06:19 -04004353#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04004354 }
4355
Michael Chanc0c050c2015-10-22 16:01:17 -04004356hwrm_func_qcaps_exit:
4357 mutex_unlock(&bp->hwrm_cmd_lock);
4358 return rc;
4359}
4360
4361static int bnxt_hwrm_func_reset(struct bnxt *bp)
4362{
4363 struct hwrm_func_reset_input req = {0};
4364
4365 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4366 req.enables = 0;
4367
4368 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4369}
4370
4371static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4372{
4373 int rc = 0;
4374 struct hwrm_queue_qportcfg_input req = {0};
4375 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4376 u8 i, *qptr;
4377
4378 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4379
4380 mutex_lock(&bp->hwrm_cmd_lock);
4381 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4382 if (rc)
4383 goto qportcfg_exit;
4384
4385 if (!resp->max_configurable_queues) {
4386 rc = -EINVAL;
4387 goto qportcfg_exit;
4388 }
4389 bp->max_tc = resp->max_configurable_queues;
Michael Chan87c374d2016-12-02 21:17:16 -05004390 bp->max_lltc = resp->max_configurable_lossless_queues;
Michael Chanc0c050c2015-10-22 16:01:17 -04004391 if (bp->max_tc > BNXT_MAX_QUEUE)
4392 bp->max_tc = BNXT_MAX_QUEUE;
4393
Michael Chan441cabb2016-09-19 03:58:02 -04004394 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4395 bp->max_tc = 1;
4396
Michael Chan87c374d2016-12-02 21:17:16 -05004397 if (bp->max_lltc > bp->max_tc)
4398 bp->max_lltc = bp->max_tc;
4399
Michael Chanc0c050c2015-10-22 16:01:17 -04004400 qptr = &resp->queue_id0;
4401 for (i = 0; i < bp->max_tc; i++) {
4402 bp->q_info[i].queue_id = *qptr++;
4403 bp->q_info[i].queue_profile = *qptr++;
4404 }
4405
4406qportcfg_exit:
4407 mutex_unlock(&bp->hwrm_cmd_lock);
4408 return rc;
4409}
4410
4411static int bnxt_hwrm_ver_get(struct bnxt *bp)
4412{
4413 int rc;
4414 struct hwrm_ver_get_input req = {0};
4415 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
4416
Michael Chane6ef2692016-03-28 19:46:05 -04004417 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
Michael Chanc0c050c2015-10-22 16:01:17 -04004418 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4419 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4420 req.hwrm_intf_min = HWRM_VERSION_MINOR;
4421 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4422 mutex_lock(&bp->hwrm_cmd_lock);
4423 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4424 if (rc)
4425 goto hwrm_ver_get_exit;
4426
4427 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4428
Michael Chan11f15ed2016-04-05 14:08:55 -04004429 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4430 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
Michael Chanc1935542015-12-27 18:19:28 -05004431 if (resp->hwrm_intf_maj < 1) {
4432 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04004433 resp->hwrm_intf_maj, resp->hwrm_intf_min,
Michael Chanc1935542015-12-27 18:19:28 -05004434 resp->hwrm_intf_upd);
4435 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04004436 }
Rob Swindell3ebf6f02016-02-26 04:00:06 -05004437 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
Michael Chanc0c050c2015-10-22 16:01:17 -04004438 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4439 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4440
Michael Chanff4fe812016-02-26 04:00:04 -05004441 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4442 if (!bp->hwrm_cmd_timeout)
4443 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4444
Michael Chane6ef2692016-03-28 19:46:05 -04004445 if (resp->hwrm_intf_maj >= 1)
4446 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4447
Michael Chan659c8052016-06-13 02:25:33 -04004448 bp->chip_num = le16_to_cpu(resp->chip_num);
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004449 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4450 !resp->chip_metal)
4451 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
Michael Chan659c8052016-06-13 02:25:33 -04004452
Michael Chanc0c050c2015-10-22 16:01:17 -04004453hwrm_ver_get_exit:
4454 mutex_unlock(&bp->hwrm_cmd_lock);
4455 return rc;
4456}
4457
Rob Swindell5ac67d82016-09-19 03:58:03 -04004458int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4459{
Rob Swindell878786d2016-09-20 03:36:33 -04004460#if IS_ENABLED(CONFIG_RTC_LIB)
Rob Swindell5ac67d82016-09-19 03:58:03 -04004461 struct hwrm_fw_set_time_input req = {0};
4462 struct rtc_time tm;
4463 struct timeval tv;
4464
4465 if (bp->hwrm_spec_code < 0x10400)
4466 return -EOPNOTSUPP;
4467
4468 do_gettimeofday(&tv);
4469 rtc_time_to_tm(tv.tv_sec, &tm);
4470 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4471 req.year = cpu_to_le16(1900 + tm.tm_year);
4472 req.month = 1 + tm.tm_mon;
4473 req.day = tm.tm_mday;
4474 req.hour = tm.tm_hour;
4475 req.minute = tm.tm_min;
4476 req.second = tm.tm_sec;
4477 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Rob Swindell878786d2016-09-20 03:36:33 -04004478#else
4479 return -EOPNOTSUPP;
4480#endif
Rob Swindell5ac67d82016-09-19 03:58:03 -04004481}
4482
Michael Chan3bdf56c2016-03-07 15:38:45 -05004483static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4484{
4485 int rc;
4486 struct bnxt_pf_info *pf = &bp->pf;
4487 struct hwrm_port_qstats_input req = {0};
4488
4489 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4490 return 0;
4491
4492 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4493 req.port_id = cpu_to_le16(pf->port_id);
4494 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4495 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4496 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4497 return rc;
4498}
4499
Michael Chanc0c050c2015-10-22 16:01:17 -04004500static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4501{
4502 if (bp->vxlan_port_cnt) {
4503 bnxt_hwrm_tunnel_dst_port_free(
4504 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4505 }
4506 bp->vxlan_port_cnt = 0;
4507 if (bp->nge_port_cnt) {
4508 bnxt_hwrm_tunnel_dst_port_free(
4509 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4510 }
4511 bp->nge_port_cnt = 0;
4512}
4513
4514static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4515{
4516 int rc, i;
4517 u32 tpa_flags = 0;
4518
4519 if (set_tpa)
4520 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4521 for (i = 0; i < bp->nr_vnics; i++) {
4522 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4523 if (rc) {
4524 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4525 rc, i);
4526 return rc;
4527 }
4528 }
4529 return 0;
4530}
4531
4532static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4533{
4534 int i;
4535
4536 for (i = 0; i < bp->nr_vnics; i++)
4537 bnxt_hwrm_vnic_set_rss(bp, i, false);
4538}
4539
4540static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4541 bool irq_re_init)
4542{
4543 if (bp->vnic_info) {
4544 bnxt_hwrm_clear_vnic_filter(bp);
4545 /* clear all RSS setting before free vnic ctx */
4546 bnxt_hwrm_clear_vnic_rss(bp);
4547 bnxt_hwrm_vnic_ctx_free(bp);
4548 /* before free the vnic, undo the vnic tpa settings */
4549 if (bp->flags & BNXT_FLAG_TPA)
4550 bnxt_set_tpa(bp, false);
4551 bnxt_hwrm_vnic_free(bp);
4552 }
4553 bnxt_hwrm_ring_free(bp, close_path);
4554 bnxt_hwrm_ring_grp_free(bp);
4555 if (irq_re_init) {
4556 bnxt_hwrm_stat_ctx_free(bp);
4557 bnxt_hwrm_free_tunnel_ports(bp);
4558 }
4559}
4560
4561static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4562{
Michael Chanae10ae72016-12-29 12:13:38 -05004563 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
Michael Chanc0c050c2015-10-22 16:01:17 -04004564 int rc;
4565
Michael Chanae10ae72016-12-29 12:13:38 -05004566 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
4567 goto skip_rss_ctx;
4568
Michael Chanc0c050c2015-10-22 16:01:17 -04004569 /* allocate context for vnic */
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004570 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
Michael Chanc0c050c2015-10-22 16:01:17 -04004571 if (rc) {
4572 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4573 vnic_id, rc);
4574 goto vnic_setup_err;
4575 }
4576 bp->rsscos_nr_ctxs++;
4577
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004578 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4579 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
4580 if (rc) {
4581 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
4582 vnic_id, rc);
4583 goto vnic_setup_err;
4584 }
4585 bp->rsscos_nr_ctxs++;
4586 }
4587
Michael Chanae10ae72016-12-29 12:13:38 -05004588skip_rss_ctx:
Michael Chanc0c050c2015-10-22 16:01:17 -04004589 /* configure default vnic, ring grp */
4590 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4591 if (rc) {
4592 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4593 vnic_id, rc);
4594 goto vnic_setup_err;
4595 }
4596
4597 /* Enable RSS hashing on vnic */
4598 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4599 if (rc) {
4600 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4601 vnic_id, rc);
4602 goto vnic_setup_err;
4603 }
4604
4605 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4606 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4607 if (rc) {
4608 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4609 vnic_id, rc);
4610 }
4611 }
4612
4613vnic_setup_err:
4614 return rc;
4615}
4616
4617static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4618{
4619#ifdef CONFIG_RFS_ACCEL
4620 int i, rc = 0;
4621
4622 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanae10ae72016-12-29 12:13:38 -05004623 struct bnxt_vnic_info *vnic;
Michael Chanc0c050c2015-10-22 16:01:17 -04004624 u16 vnic_id = i + 1;
4625 u16 ring_id = i;
4626
4627 if (vnic_id >= bp->nr_vnics)
4628 break;
4629
Michael Chanae10ae72016-12-29 12:13:38 -05004630 vnic = &bp->vnic_info[vnic_id];
4631 vnic->flags |= BNXT_VNIC_RFS_FLAG;
4632 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
4633 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
Michael Chanb81a90d2016-01-02 23:45:01 -05004634 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004635 if (rc) {
4636 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4637 vnic_id, rc);
4638 break;
4639 }
4640 rc = bnxt_setup_vnic(bp, vnic_id);
4641 if (rc)
4642 break;
4643 }
4644 return rc;
4645#else
4646 return 0;
4647#endif
4648}
4649
Michael Chan17c71ac2016-07-01 18:46:27 -04004650/* Allow PF and VF with default VLAN to be in promiscuous mode */
4651static bool bnxt_promisc_ok(struct bnxt *bp)
4652{
4653#ifdef CONFIG_BNXT_SRIOV
4654 if (BNXT_VF(bp) && !bp->vf.vlan)
4655 return false;
4656#endif
4657 return true;
4658}
4659
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004660static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
4661{
4662 unsigned int rc = 0;
4663
4664 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
4665 if (rc) {
4666 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4667 rc);
4668 return rc;
4669 }
4670
4671 rc = bnxt_hwrm_vnic_cfg(bp, 1);
4672 if (rc) {
4673 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4674 rc);
4675 return rc;
4676 }
4677 return rc;
4678}
4679
Michael Chanb664f002015-12-02 01:54:08 -05004680static int bnxt_cfg_rx_mode(struct bnxt *);
Michael Chan7d2837d2016-05-04 16:56:44 -04004681static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
Michael Chanb664f002015-12-02 01:54:08 -05004682
Michael Chanc0c050c2015-10-22 16:01:17 -04004683static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4684{
Michael Chan7d2837d2016-05-04 16:56:44 -04004685 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
Michael Chanc0c050c2015-10-22 16:01:17 -04004686 int rc = 0;
Prashant Sreedharan76595192016-07-18 07:15:22 -04004687 unsigned int rx_nr_rings = bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004688
4689 if (irq_re_init) {
4690 rc = bnxt_hwrm_stat_ctx_alloc(bp);
4691 if (rc) {
4692 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
4693 rc);
4694 goto err_out;
4695 }
4696 }
4697
4698 rc = bnxt_hwrm_ring_alloc(bp);
4699 if (rc) {
4700 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
4701 goto err_out;
4702 }
4703
4704 rc = bnxt_hwrm_ring_grp_alloc(bp);
4705 if (rc) {
4706 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
4707 goto err_out;
4708 }
4709
Prashant Sreedharan76595192016-07-18 07:15:22 -04004710 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4711 rx_nr_rings--;
4712
Michael Chanc0c050c2015-10-22 16:01:17 -04004713 /* default vnic 0 */
Prashant Sreedharan76595192016-07-18 07:15:22 -04004714 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004715 if (rc) {
4716 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
4717 goto err_out;
4718 }
4719
4720 rc = bnxt_setup_vnic(bp, 0);
4721 if (rc)
4722 goto err_out;
4723
4724 if (bp->flags & BNXT_FLAG_RFS) {
4725 rc = bnxt_alloc_rfs_vnics(bp);
4726 if (rc)
4727 goto err_out;
4728 }
4729
4730 if (bp->flags & BNXT_FLAG_TPA) {
4731 rc = bnxt_set_tpa(bp, true);
4732 if (rc)
4733 goto err_out;
4734 }
4735
4736 if (BNXT_VF(bp))
4737 bnxt_update_vf_mac(bp);
4738
4739 /* Filter for default vnic 0 */
4740 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
4741 if (rc) {
4742 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
4743 goto err_out;
4744 }
Michael Chan7d2837d2016-05-04 16:56:44 -04004745 vnic->uc_filter_count = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04004746
Michael Chan7d2837d2016-05-04 16:56:44 -04004747 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
Michael Chanc0c050c2015-10-22 16:01:17 -04004748
Michael Chan17c71ac2016-07-01 18:46:27 -04004749 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chan7d2837d2016-05-04 16:56:44 -04004750 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4751
4752 if (bp->dev->flags & IFF_ALLMULTI) {
4753 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4754 vnic->mc_list_count = 0;
4755 } else {
4756 u32 mask = 0;
4757
4758 bnxt_mc_list_updated(bp, &mask);
4759 vnic->rx_mask |= mask;
4760 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004761
Michael Chanb664f002015-12-02 01:54:08 -05004762 rc = bnxt_cfg_rx_mode(bp);
4763 if (rc)
Michael Chanc0c050c2015-10-22 16:01:17 -04004764 goto err_out;
Michael Chanc0c050c2015-10-22 16:01:17 -04004765
4766 rc = bnxt_hwrm_set_coal(bp);
4767 if (rc)
4768 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004769 rc);
4770
4771 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4772 rc = bnxt_setup_nitroa0_vnic(bp);
4773 if (rc)
4774 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
4775 rc);
4776 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004777
Michael Chancf6645f2016-06-13 02:25:28 -04004778 if (BNXT_VF(bp)) {
4779 bnxt_hwrm_func_qcfg(bp);
4780 netdev_update_features(bp->dev);
4781 }
4782
Michael Chanc0c050c2015-10-22 16:01:17 -04004783 return 0;
4784
4785err_out:
4786 bnxt_hwrm_resource_free(bp, 0, true);
4787
4788 return rc;
4789}
4790
4791static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
4792{
4793 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
4794 return 0;
4795}
4796
4797static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
4798{
4799 bnxt_init_rx_rings(bp);
4800 bnxt_init_tx_rings(bp);
4801 bnxt_init_ring_grps(bp, irq_re_init);
4802 bnxt_init_vnics(bp);
4803
4804 return bnxt_init_chip(bp, irq_re_init);
4805}
4806
Michael Chanc0c050c2015-10-22 16:01:17 -04004807static int bnxt_set_real_num_queues(struct bnxt *bp)
4808{
4809 int rc;
4810 struct net_device *dev = bp->dev;
4811
4812 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4813 if (rc)
4814 return rc;
4815
4816 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4817 if (rc)
4818 return rc;
4819
4820#ifdef CONFIG_RFS_ACCEL
Michael Chan45019a12015-12-27 18:19:22 -05004821 if (bp->flags & BNXT_FLAG_RFS)
Michael Chanc0c050c2015-10-22 16:01:17 -04004822 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004823#endif
4824
4825 return rc;
4826}
4827
Michael Chan6e6c5a52016-01-02 23:45:02 -05004828static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
4829 bool shared)
4830{
4831 int _rx = *rx, _tx = *tx;
4832
4833 if (shared) {
4834 *rx = min_t(int, _rx, max);
4835 *tx = min_t(int, _tx, max);
4836 } else {
4837 if (max < 2)
4838 return -ENOMEM;
4839
4840 while (_rx + _tx > max) {
4841 if (_rx > _tx && _rx > 1)
4842 _rx--;
4843 else if (_tx > 1)
4844 _tx--;
4845 }
4846 *rx = _rx;
4847 *tx = _tx;
4848 }
4849 return 0;
4850}
4851
Michael Chan78095922016-12-07 00:26:16 -05004852static void bnxt_setup_msix(struct bnxt *bp)
4853{
4854 const int len = sizeof(bp->irq_tbl[0].name);
4855 struct net_device *dev = bp->dev;
4856 int tcs, i;
4857
4858 tcs = netdev_get_num_tc(dev);
4859 if (tcs > 1) {
4860 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4861 if (bp->tx_nr_rings_per_tc == 0) {
4862 netdev_reset_tc(dev);
4863 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4864 } else {
4865 int i, off, count;
4866
4867 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4868 for (i = 0; i < tcs; i++) {
4869 count = bp->tx_nr_rings_per_tc;
4870 off = i * count;
4871 netdev_set_tc_queue(dev, i, count, off);
4872 }
4873 }
4874 }
4875
4876 for (i = 0; i < bp->cp_nr_rings; i++) {
4877 char *attr;
4878
4879 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4880 attr = "TxRx";
4881 else if (i < bp->rx_nr_rings)
4882 attr = "rx";
4883 else
4884 attr = "tx";
4885
4886 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
4887 i);
4888 bp->irq_tbl[i].handler = bnxt_msix;
4889 }
4890}
4891
4892static void bnxt_setup_inta(struct bnxt *bp)
4893{
4894 const int len = sizeof(bp->irq_tbl[0].name);
4895
4896 if (netdev_get_num_tc(bp->dev))
4897 netdev_reset_tc(bp->dev);
4898
4899 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
4900 0);
4901 bp->irq_tbl[0].handler = bnxt_inta;
4902}
4903
4904static int bnxt_setup_int_mode(struct bnxt *bp)
4905{
4906 int rc;
4907
4908 if (bp->flags & BNXT_FLAG_USING_MSIX)
4909 bnxt_setup_msix(bp);
4910 else
4911 bnxt_setup_inta(bp);
4912
4913 rc = bnxt_set_real_num_queues(bp);
4914 return rc;
4915}
4916
Michael Chan8079e8f2016-12-29 12:13:37 -05004917static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
4918{
4919#if defined(CONFIG_BNXT_SRIOV)
4920 if (BNXT_VF(bp))
4921 return bp->vf.max_rsscos_ctxs;
4922#endif
4923 return bp->pf.max_rsscos_ctxs;
4924}
4925
4926static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
4927{
4928#if defined(CONFIG_BNXT_SRIOV)
4929 if (BNXT_VF(bp))
4930 return bp->vf.max_vnics;
4931#endif
4932 return bp->pf.max_vnics;
4933}
4934
Michael Chane4060d32016-12-07 00:26:19 -05004935unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
4936{
4937#if defined(CONFIG_BNXT_SRIOV)
4938 if (BNXT_VF(bp))
4939 return bp->vf.max_stat_ctxs;
4940#endif
4941 return bp->pf.max_stat_ctxs;
4942}
4943
Michael Chana588e452016-12-07 00:26:21 -05004944void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
4945{
4946#if defined(CONFIG_BNXT_SRIOV)
4947 if (BNXT_VF(bp))
4948 bp->vf.max_stat_ctxs = max;
4949 else
4950#endif
4951 bp->pf.max_stat_ctxs = max;
4952}
4953
Michael Chane4060d32016-12-07 00:26:19 -05004954unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
4955{
4956#if defined(CONFIG_BNXT_SRIOV)
4957 if (BNXT_VF(bp))
4958 return bp->vf.max_cp_rings;
4959#endif
4960 return bp->pf.max_cp_rings;
4961}
4962
Michael Chana588e452016-12-07 00:26:21 -05004963void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
4964{
4965#if defined(CONFIG_BNXT_SRIOV)
4966 if (BNXT_VF(bp))
4967 bp->vf.max_cp_rings = max;
4968 else
4969#endif
4970 bp->pf.max_cp_rings = max;
4971}
4972
Michael Chan78095922016-12-07 00:26:16 -05004973static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
4974{
4975#if defined(CONFIG_BNXT_SRIOV)
4976 if (BNXT_VF(bp))
4977 return bp->vf.max_irqs;
4978#endif
4979 return bp->pf.max_irqs;
4980}
4981
Michael Chan33c26572016-12-07 00:26:15 -05004982void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
4983{
4984#if defined(CONFIG_BNXT_SRIOV)
4985 if (BNXT_VF(bp))
4986 bp->vf.max_irqs = max_irqs;
4987 else
4988#endif
4989 bp->pf.max_irqs = max_irqs;
4990}
4991
Michael Chan78095922016-12-07 00:26:16 -05004992static int bnxt_init_msix(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04004993{
Michael Chan01657bc2016-01-02 23:45:03 -05004994 int i, total_vecs, rc = 0, min = 1;
Michael Chan78095922016-12-07 00:26:16 -05004995 struct msix_entry *msix_ent;
Michael Chanc0c050c2015-10-22 16:01:17 -04004996
Michael Chan78095922016-12-07 00:26:16 -05004997 total_vecs = bnxt_get_max_func_irqs(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04004998 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4999 if (!msix_ent)
5000 return -ENOMEM;
5001
5002 for (i = 0; i < total_vecs; i++) {
5003 msix_ent[i].entry = i;
5004 msix_ent[i].vector = 0;
5005 }
5006
Michael Chan01657bc2016-01-02 23:45:03 -05005007 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
5008 min = 2;
5009
5010 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
Michael Chanc0c050c2015-10-22 16:01:17 -04005011 if (total_vecs < 0) {
5012 rc = -ENODEV;
5013 goto msix_setup_exit;
5014 }
5015
5016 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
5017 if (bp->irq_tbl) {
Michael Chan78095922016-12-07 00:26:16 -05005018 for (i = 0; i < total_vecs; i++)
5019 bp->irq_tbl[i].vector = msix_ent[i].vector;
Michael Chanc0c050c2015-10-22 16:01:17 -04005020
Michael Chan78095922016-12-07 00:26:16 -05005021 bp->total_irqs = total_vecs;
Michael Chanc0c050c2015-10-22 16:01:17 -04005022 /* Trim rings based upon num of vectors allocated */
Michael Chan6e6c5a52016-01-02 23:45:02 -05005023 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
Michael Chan01657bc2016-01-02 23:45:03 -05005024 total_vecs, min == 1);
Michael Chan6e6c5a52016-01-02 23:45:02 -05005025 if (rc)
5026 goto msix_setup_exit;
5027
Michael Chanc0c050c2015-10-22 16:01:17 -04005028 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
Michael Chan78095922016-12-07 00:26:16 -05005029 bp->cp_nr_rings = (min == 1) ?
5030 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5031 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04005032
Michael Chanc0c050c2015-10-22 16:01:17 -04005033 } else {
5034 rc = -ENOMEM;
5035 goto msix_setup_exit;
5036 }
5037 bp->flags |= BNXT_FLAG_USING_MSIX;
5038 kfree(msix_ent);
5039 return 0;
5040
5041msix_setup_exit:
Michael Chan78095922016-12-07 00:26:16 -05005042 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
5043 kfree(bp->irq_tbl);
5044 bp->irq_tbl = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04005045 pci_disable_msix(bp->pdev);
5046 kfree(msix_ent);
5047 return rc;
5048}
5049
Michael Chan78095922016-12-07 00:26:16 -05005050static int bnxt_init_inta(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005051{
Michael Chanc0c050c2015-10-22 16:01:17 -04005052 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
Michael Chan78095922016-12-07 00:26:16 -05005053 if (!bp->irq_tbl)
5054 return -ENOMEM;
5055
5056 bp->total_irqs = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04005057 bp->rx_nr_rings = 1;
5058 bp->tx_nr_rings = 1;
5059 bp->cp_nr_rings = 1;
5060 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
Michael Chan01657bc2016-01-02 23:45:03 -05005061 bp->flags |= BNXT_FLAG_SHARED_RINGS;
Michael Chanc0c050c2015-10-22 16:01:17 -04005062 bp->irq_tbl[0].vector = bp->pdev->irq;
Michael Chan78095922016-12-07 00:26:16 -05005063 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04005064}
5065
Michael Chan78095922016-12-07 00:26:16 -05005066static int bnxt_init_int_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005067{
5068 int rc = 0;
5069
5070 if (bp->flags & BNXT_FLAG_MSIX_CAP)
Michael Chan78095922016-12-07 00:26:16 -05005071 rc = bnxt_init_msix(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005072
Michael Chan1fa72e22016-04-25 02:30:49 -04005073 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005074 /* fallback to INTA */
Michael Chan78095922016-12-07 00:26:16 -05005075 rc = bnxt_init_inta(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005076 }
5077 return rc;
5078}
5079
Michael Chan78095922016-12-07 00:26:16 -05005080static void bnxt_clear_int_mode(struct bnxt *bp)
5081{
5082 if (bp->flags & BNXT_FLAG_USING_MSIX)
5083 pci_disable_msix(bp->pdev);
5084
5085 kfree(bp->irq_tbl);
5086 bp->irq_tbl = NULL;
5087 bp->flags &= ~BNXT_FLAG_USING_MSIX;
5088}
5089
Michael Chanc0c050c2015-10-22 16:01:17 -04005090static void bnxt_free_irq(struct bnxt *bp)
5091{
5092 struct bnxt_irq *irq;
5093 int i;
5094
5095#ifdef CONFIG_RFS_ACCEL
5096 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
5097 bp->dev->rx_cpu_rmap = NULL;
5098#endif
5099 if (!bp->irq_tbl)
5100 return;
5101
5102 for (i = 0; i < bp->cp_nr_rings; i++) {
5103 irq = &bp->irq_tbl[i];
5104 if (irq->requested)
5105 free_irq(irq->vector, bp->bnapi[i]);
5106 irq->requested = 0;
5107 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005108}
5109
5110static int bnxt_request_irq(struct bnxt *bp)
5111{
Michael Chanb81a90d2016-01-02 23:45:01 -05005112 int i, j, rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04005113 unsigned long flags = 0;
5114#ifdef CONFIG_RFS_ACCEL
5115 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
5116#endif
5117
5118 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
5119 flags = IRQF_SHARED;
5120
Michael Chanb81a90d2016-01-02 23:45:01 -05005121 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005122 struct bnxt_irq *irq = &bp->irq_tbl[i];
5123#ifdef CONFIG_RFS_ACCEL
Michael Chanb81a90d2016-01-02 23:45:01 -05005124 if (rmap && bp->bnapi[i]->rx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005125 rc = irq_cpu_rmap_add(rmap, irq->vector);
5126 if (rc)
5127 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05005128 j);
5129 j++;
Michael Chanc0c050c2015-10-22 16:01:17 -04005130 }
5131#endif
5132 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5133 bp->bnapi[i]);
5134 if (rc)
5135 break;
5136
5137 irq->requested = 1;
5138 }
5139 return rc;
5140}
5141
5142static void bnxt_del_napi(struct bnxt *bp)
5143{
5144 int i;
5145
5146 if (!bp->bnapi)
5147 return;
5148
5149 for (i = 0; i < bp->cp_nr_rings; i++) {
5150 struct bnxt_napi *bnapi = bp->bnapi[i];
5151
5152 napi_hash_del(&bnapi->napi);
5153 netif_napi_del(&bnapi->napi);
5154 }
Eric Dumazete5f6f562016-11-16 06:31:52 -08005155 /* We called napi_hash_del() before netif_napi_del(), we need
5156 * to respect an RCU grace period before freeing napi structures.
5157 */
5158 synchronize_net();
Michael Chanc0c050c2015-10-22 16:01:17 -04005159}
5160
5161static void bnxt_init_napi(struct bnxt *bp)
5162{
5163 int i;
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005164 unsigned int cp_nr_rings = bp->cp_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04005165 struct bnxt_napi *bnapi;
5166
5167 if (bp->flags & BNXT_FLAG_USING_MSIX) {
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005168 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5169 cp_nr_rings--;
5170 for (i = 0; i < cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005171 bnapi = bp->bnapi[i];
5172 netif_napi_add(bp->dev, &bnapi->napi,
5173 bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04005174 }
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005175 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5176 bnapi = bp->bnapi[cp_nr_rings];
5177 netif_napi_add(bp->dev, &bnapi->napi,
5178 bnxt_poll_nitroa0, 64);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005179 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005180 } else {
5181 bnapi = bp->bnapi[0];
5182 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04005183 }
5184}
5185
5186static void bnxt_disable_napi(struct bnxt *bp)
5187{
5188 int i;
5189
5190 if (!bp->bnapi)
5191 return;
5192
Michael Chanb356a2e2016-12-29 12:13:31 -05005193 for (i = 0; i < bp->cp_nr_rings; i++)
Michael Chanc0c050c2015-10-22 16:01:17 -04005194 napi_disable(&bp->bnapi[i]->napi);
Michael Chanc0c050c2015-10-22 16:01:17 -04005195}
5196
5197static void bnxt_enable_napi(struct bnxt *bp)
5198{
5199 int i;
5200
5201 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chanfa7e2812016-05-10 19:18:00 -04005202 bp->bnapi[i]->in_reset = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04005203 napi_enable(&bp->bnapi[i]->napi);
5204 }
5205}
5206
Michael Chan7df4ae92016-12-02 21:17:17 -05005207void bnxt_tx_disable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005208{
5209 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005210 struct bnxt_tx_ring_info *txr;
5211 struct netdev_queue *txq;
5212
Michael Chanb6ab4b02016-01-02 23:44:59 -05005213 if (bp->tx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005214 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005215 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005216 txq = netdev_get_tx_queue(bp->dev, i);
Michael Chanc0c050c2015-10-22 16:01:17 -04005217 txr->dev_state = BNXT_DEV_STATE_CLOSING;
Michael Chanc0c050c2015-10-22 16:01:17 -04005218 }
5219 }
5220 /* Stop all TX queues */
5221 netif_tx_disable(bp->dev);
5222 netif_carrier_off(bp->dev);
5223}
5224
Michael Chan7df4ae92016-12-02 21:17:17 -05005225void bnxt_tx_enable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005226{
5227 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005228 struct bnxt_tx_ring_info *txr;
5229 struct netdev_queue *txq;
5230
5231 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005232 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005233 txq = netdev_get_tx_queue(bp->dev, i);
5234 txr->dev_state = 0;
5235 }
5236 netif_tx_wake_all_queues(bp->dev);
5237 if (bp->link_info.link_up)
5238 netif_carrier_on(bp->dev);
5239}
5240
5241static void bnxt_report_link(struct bnxt *bp)
5242{
5243 if (bp->link_info.link_up) {
5244 const char *duplex;
5245 const char *flow_ctrl;
5246 u16 speed;
5247
5248 netif_carrier_on(bp->dev);
5249 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5250 duplex = "full";
5251 else
5252 duplex = "half";
5253 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5254 flow_ctrl = "ON - receive & transmit";
5255 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5256 flow_ctrl = "ON - transmit";
5257 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5258 flow_ctrl = "ON - receive";
5259 else
5260 flow_ctrl = "none";
5261 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
5262 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
5263 speed, duplex, flow_ctrl);
Michael Chan170ce012016-04-05 14:08:57 -04005264 if (bp->flags & BNXT_FLAG_EEE_CAP)
5265 netdev_info(bp->dev, "EEE is %s\n",
5266 bp->eee.eee_active ? "active" :
5267 "not active");
Michael Chanc0c050c2015-10-22 16:01:17 -04005268 } else {
5269 netif_carrier_off(bp->dev);
5270 netdev_err(bp->dev, "NIC Link is Down\n");
5271 }
5272}
5273
Michael Chan170ce012016-04-05 14:08:57 -04005274static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5275{
5276 int rc = 0;
5277 struct hwrm_port_phy_qcaps_input req = {0};
5278 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan93ed8112016-06-13 02:25:37 -04005279 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chan170ce012016-04-05 14:08:57 -04005280
5281 if (bp->hwrm_spec_code < 0x10201)
5282 return 0;
5283
5284 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5285
5286 mutex_lock(&bp->hwrm_cmd_lock);
5287 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5288 if (rc)
5289 goto hwrm_phy_qcaps_exit;
5290
5291 if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
5292 struct ethtool_eee *eee = &bp->eee;
5293 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5294
5295 bp->flags |= BNXT_FLAG_EEE_CAP;
5296 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5297 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5298 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5299 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5300 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5301 }
Michael Chan93ed8112016-06-13 02:25:37 -04005302 link_info->support_auto_speeds =
5303 le16_to_cpu(resp->supported_speeds_auto_mode);
Michael Chan170ce012016-04-05 14:08:57 -04005304
5305hwrm_phy_qcaps_exit:
5306 mutex_unlock(&bp->hwrm_cmd_lock);
5307 return rc;
5308}
5309
Michael Chanc0c050c2015-10-22 16:01:17 -04005310static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5311{
5312 int rc = 0;
5313 struct bnxt_link_info *link_info = &bp->link_info;
5314 struct hwrm_port_phy_qcfg_input req = {0};
5315 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5316 u8 link_up = link_info->link_up;
Michael Chan286ef9d2016-11-16 21:13:08 -05005317 u16 diff;
Michael Chanc0c050c2015-10-22 16:01:17 -04005318
5319 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5320
5321 mutex_lock(&bp->hwrm_cmd_lock);
5322 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5323 if (rc) {
5324 mutex_unlock(&bp->hwrm_cmd_lock);
5325 return rc;
5326 }
5327
5328 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5329 link_info->phy_link_status = resp->link;
5330 link_info->duplex = resp->duplex;
5331 link_info->pause = resp->pause;
5332 link_info->auto_mode = resp->auto_mode;
5333 link_info->auto_pause_setting = resp->auto_pause;
Michael Chan32773602016-03-07 15:38:42 -05005334 link_info->lp_pause = resp->link_partner_adv_pause;
Michael Chanc0c050c2015-10-22 16:01:17 -04005335 link_info->force_pause_setting = resp->force_pause;
Michael Chanc1935542015-12-27 18:19:28 -05005336 link_info->duplex_setting = resp->duplex;
Michael Chanc0c050c2015-10-22 16:01:17 -04005337 if (link_info->phy_link_status == BNXT_LINK_LINK)
5338 link_info->link_speed = le16_to_cpu(resp->link_speed);
5339 else
5340 link_info->link_speed = 0;
5341 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
Michael Chanc0c050c2015-10-22 16:01:17 -04005342 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5343 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
Michael Chan32773602016-03-07 15:38:42 -05005344 link_info->lp_auto_link_speeds =
5345 le16_to_cpu(resp->link_partner_adv_speeds);
Michael Chanc0c050c2015-10-22 16:01:17 -04005346 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5347 link_info->phy_ver[0] = resp->phy_maj;
5348 link_info->phy_ver[1] = resp->phy_min;
5349 link_info->phy_ver[2] = resp->phy_bld;
5350 link_info->media_type = resp->media_type;
Michael Chan03efbec2016-04-11 04:11:11 -04005351 link_info->phy_type = resp->phy_type;
Michael Chan11f15ed2016-04-05 14:08:55 -04005352 link_info->transceiver = resp->xcvr_pkg_type;
Michael Chan170ce012016-04-05 14:08:57 -04005353 link_info->phy_addr = resp->eee_config_phy_addr &
5354 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
Ajit Khaparde42ee18f2016-05-15 03:04:44 -04005355 link_info->module_status = resp->module_status;
Michael Chanc0c050c2015-10-22 16:01:17 -04005356
Michael Chan170ce012016-04-05 14:08:57 -04005357 if (bp->flags & BNXT_FLAG_EEE_CAP) {
5358 struct ethtool_eee *eee = &bp->eee;
5359 u16 fw_speeds;
5360
5361 eee->eee_active = 0;
5362 if (resp->eee_config_phy_addr &
5363 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5364 eee->eee_active = 1;
5365 fw_speeds = le16_to_cpu(
5366 resp->link_partner_adv_eee_link_speed_mask);
5367 eee->lp_advertised =
5368 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5369 }
5370
5371 /* Pull initial EEE config */
5372 if (!chng_link_state) {
5373 if (resp->eee_config_phy_addr &
5374 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5375 eee->eee_enabled = 1;
5376
5377 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5378 eee->advertised =
5379 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5380
5381 if (resp->eee_config_phy_addr &
5382 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5383 __le32 tmr;
5384
5385 eee->tx_lpi_enabled = 1;
5386 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5387 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5388 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5389 }
5390 }
5391 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005392 /* TODO: need to add more logic to report VF link */
5393 if (chng_link_state) {
5394 if (link_info->phy_link_status == BNXT_LINK_LINK)
5395 link_info->link_up = 1;
5396 else
5397 link_info->link_up = 0;
5398 if (link_up != link_info->link_up)
5399 bnxt_report_link(bp);
5400 } else {
5401 /* alwasy link down if not require to update link state */
5402 link_info->link_up = 0;
5403 }
5404 mutex_unlock(&bp->hwrm_cmd_lock);
Michael Chan286ef9d2016-11-16 21:13:08 -05005405
5406 diff = link_info->support_auto_speeds ^ link_info->advertising;
5407 if ((link_info->support_auto_speeds | diff) !=
5408 link_info->support_auto_speeds) {
5409 /* An advertised speed is no longer supported, so we need to
5410 * update the advertisement settings. See bnxt_reset() for
5411 * comments about the rtnl_lock() sequence below.
5412 */
5413 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5414 rtnl_lock();
5415 link_info->advertising = link_info->support_auto_speeds;
5416 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
5417 (link_info->autoneg & BNXT_AUTONEG_SPEED))
5418 bnxt_hwrm_set_link_setting(bp, true, false);
5419 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5420 rtnl_unlock();
5421 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005422 return 0;
5423}
5424
Michael Chan10289be2016-05-15 03:04:49 -04005425static void bnxt_get_port_module_status(struct bnxt *bp)
5426{
5427 struct bnxt_link_info *link_info = &bp->link_info;
5428 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5429 u8 module_status;
5430
5431 if (bnxt_update_link(bp, true))
5432 return;
5433
5434 module_status = link_info->module_status;
5435 switch (module_status) {
5436 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5437 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5438 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5439 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5440 bp->pf.port_id);
5441 if (bp->hwrm_spec_code >= 0x10201) {
5442 netdev_warn(bp->dev, "Module part number %s\n",
5443 resp->phy_vendor_partnumber);
5444 }
5445 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5446 netdev_warn(bp->dev, "TX is disabled\n");
5447 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5448 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5449 }
5450}
5451
Michael Chanc0c050c2015-10-22 16:01:17 -04005452static void
5453bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5454{
5455 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
Michael Chanc9ee9512016-04-05 14:08:56 -04005456 if (bp->hwrm_spec_code >= 0x10201)
5457 req->auto_pause =
5458 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
Michael Chanc0c050c2015-10-22 16:01:17 -04005459 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5460 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5461 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
Michael Chan49b5c7a2016-03-28 19:46:06 -04005462 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
Michael Chanc0c050c2015-10-22 16:01:17 -04005463 req->enables |=
5464 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5465 } else {
5466 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5467 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5468 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5469 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5470 req->enables |=
5471 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
Michael Chanc9ee9512016-04-05 14:08:56 -04005472 if (bp->hwrm_spec_code >= 0x10201) {
5473 req->auto_pause = req->force_pause;
5474 req->enables |= cpu_to_le32(
5475 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5476 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005477 }
5478}
5479
5480static void bnxt_hwrm_set_link_common(struct bnxt *bp,
5481 struct hwrm_port_phy_cfg_input *req)
5482{
5483 u8 autoneg = bp->link_info.autoneg;
5484 u16 fw_link_speed = bp->link_info.req_link_speed;
Michael Chan68515a12016-12-29 12:13:34 -05005485 u16 advertising = bp->link_info.advertising;
Michael Chanc0c050c2015-10-22 16:01:17 -04005486
5487 if (autoneg & BNXT_AUTONEG_SPEED) {
5488 req->auto_mode |=
Michael Chan11f15ed2016-04-05 14:08:55 -04005489 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04005490
5491 req->enables |= cpu_to_le32(
5492 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
5493 req->auto_link_speed_mask = cpu_to_le16(advertising);
5494
5495 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
5496 req->flags |=
5497 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
5498 } else {
5499 req->force_link_speed = cpu_to_le16(fw_link_speed);
5500 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
5501 }
5502
Michael Chanc0c050c2015-10-22 16:01:17 -04005503 /* tell chimp that the setting takes effect immediately */
5504 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
5505}
5506
5507int bnxt_hwrm_set_pause(struct bnxt *bp)
5508{
5509 struct hwrm_port_phy_cfg_input req = {0};
5510 int rc;
5511
5512 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5513 bnxt_hwrm_set_pause_common(bp, &req);
5514
5515 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
5516 bp->link_info.force_link_chng)
5517 bnxt_hwrm_set_link_common(bp, &req);
5518
5519 mutex_lock(&bp->hwrm_cmd_lock);
5520 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5521 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
5522 /* since changing of pause setting doesn't trigger any link
5523 * change event, the driver needs to update the current pause
5524 * result upon successfully return of the phy_cfg command
5525 */
5526 bp->link_info.pause =
5527 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
5528 bp->link_info.auto_pause_setting = 0;
5529 if (!bp->link_info.force_link_chng)
5530 bnxt_report_link(bp);
5531 }
5532 bp->link_info.force_link_chng = false;
5533 mutex_unlock(&bp->hwrm_cmd_lock);
5534 return rc;
5535}
5536
Michael Chan939f7f02016-04-05 14:08:58 -04005537static void bnxt_hwrm_set_eee(struct bnxt *bp,
5538 struct hwrm_port_phy_cfg_input *req)
5539{
5540 struct ethtool_eee *eee = &bp->eee;
5541
5542 if (eee->eee_enabled) {
5543 u16 eee_speeds;
5544 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
5545
5546 if (eee->tx_lpi_enabled)
5547 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
5548 else
5549 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
5550
5551 req->flags |= cpu_to_le32(flags);
5552 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
5553 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
5554 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
5555 } else {
5556 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
5557 }
5558}
5559
5560int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
Michael Chanc0c050c2015-10-22 16:01:17 -04005561{
5562 struct hwrm_port_phy_cfg_input req = {0};
5563
5564 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5565 if (set_pause)
5566 bnxt_hwrm_set_pause_common(bp, &req);
5567
5568 bnxt_hwrm_set_link_common(bp, &req);
Michael Chan939f7f02016-04-05 14:08:58 -04005569
5570 if (set_eee)
5571 bnxt_hwrm_set_eee(bp, &req);
Michael Chanc0c050c2015-10-22 16:01:17 -04005572 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5573}
5574
Michael Chan33f7d552016-04-11 04:11:12 -04005575static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
5576{
5577 struct hwrm_port_phy_cfg_input req = {0};
5578
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04005579 if (!BNXT_SINGLE_PF(bp))
Michael Chan33f7d552016-04-11 04:11:12 -04005580 return 0;
5581
5582 if (pci_num_vf(bp->pdev))
5583 return 0;
5584
5585 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
Michael Chan16d663a2016-11-16 21:13:07 -05005586 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
Michael Chan33f7d552016-04-11 04:11:12 -04005587 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5588}
5589
Michael Chan939f7f02016-04-05 14:08:58 -04005590static bool bnxt_eee_config_ok(struct bnxt *bp)
5591{
5592 struct ethtool_eee *eee = &bp->eee;
5593 struct bnxt_link_info *link_info = &bp->link_info;
5594
5595 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
5596 return true;
5597
5598 if (eee->eee_enabled) {
5599 u32 advertising =
5600 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
5601
5602 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5603 eee->eee_enabled = 0;
5604 return false;
5605 }
5606 if (eee->advertised & ~advertising) {
5607 eee->advertised = advertising & eee->supported;
5608 return false;
5609 }
5610 }
5611 return true;
5612}
5613
Michael Chanc0c050c2015-10-22 16:01:17 -04005614static int bnxt_update_phy_setting(struct bnxt *bp)
5615{
5616 int rc;
5617 bool update_link = false;
5618 bool update_pause = false;
Michael Chan939f7f02016-04-05 14:08:58 -04005619 bool update_eee = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04005620 struct bnxt_link_info *link_info = &bp->link_info;
5621
5622 rc = bnxt_update_link(bp, true);
5623 if (rc) {
5624 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
5625 rc);
5626 return rc;
5627 }
5628 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
Michael Chanc9ee9512016-04-05 14:08:56 -04005629 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
5630 link_info->req_flow_ctrl)
Michael Chanc0c050c2015-10-22 16:01:17 -04005631 update_pause = true;
5632 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
5633 link_info->force_pause_setting != link_info->req_flow_ctrl)
5634 update_pause = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04005635 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5636 if (BNXT_AUTO_MODE(link_info->auto_mode))
5637 update_link = true;
5638 if (link_info->req_link_speed != link_info->force_link_speed)
5639 update_link = true;
Michael Chande730182016-02-19 19:43:20 -05005640 if (link_info->req_duplex != link_info->duplex_setting)
5641 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04005642 } else {
5643 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
5644 update_link = true;
5645 if (link_info->advertising != link_info->auto_link_speeds)
5646 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04005647 }
5648
Michael Chan16d663a2016-11-16 21:13:07 -05005649 /* The last close may have shutdown the link, so need to call
5650 * PHY_CFG to bring it back up.
5651 */
5652 if (!netif_carrier_ok(bp->dev))
5653 update_link = true;
5654
Michael Chan939f7f02016-04-05 14:08:58 -04005655 if (!bnxt_eee_config_ok(bp))
5656 update_eee = true;
5657
Michael Chanc0c050c2015-10-22 16:01:17 -04005658 if (update_link)
Michael Chan939f7f02016-04-05 14:08:58 -04005659 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
Michael Chanc0c050c2015-10-22 16:01:17 -04005660 else if (update_pause)
5661 rc = bnxt_hwrm_set_pause(bp);
5662 if (rc) {
5663 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
5664 rc);
5665 return rc;
5666 }
5667
5668 return rc;
5669}
5670
Jeffrey Huang11809492015-11-05 16:25:49 -05005671/* Common routine to pre-map certain register block to different GRC window.
5672 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
5673 * in PF and 3 windows in VF that can be customized to map in different
5674 * register blocks.
5675 */
5676static void bnxt_preset_reg_win(struct bnxt *bp)
5677{
5678 if (BNXT_PF(bp)) {
5679 /* CAG registers map to GRC window #4 */
5680 writel(BNXT_CAG_REG_BASE,
5681 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
5682 }
5683}
5684
Michael Chanc0c050c2015-10-22 16:01:17 -04005685static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5686{
5687 int rc = 0;
5688
Jeffrey Huang11809492015-11-05 16:25:49 -05005689 bnxt_preset_reg_win(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005690 netif_carrier_off(bp->dev);
5691 if (irq_re_init) {
5692 rc = bnxt_setup_int_mode(bp);
5693 if (rc) {
5694 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
5695 rc);
5696 return rc;
5697 }
5698 }
5699 if ((bp->flags & BNXT_FLAG_RFS) &&
5700 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
5701 /* disable RFS if falling back to INTA */
5702 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
5703 bp->flags &= ~BNXT_FLAG_RFS;
5704 }
5705
5706 rc = bnxt_alloc_mem(bp, irq_re_init);
5707 if (rc) {
5708 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
5709 goto open_err_free_mem;
5710 }
5711
5712 if (irq_re_init) {
5713 bnxt_init_napi(bp);
5714 rc = bnxt_request_irq(bp);
5715 if (rc) {
5716 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
5717 goto open_err;
5718 }
5719 }
5720
5721 bnxt_enable_napi(bp);
5722
5723 rc = bnxt_init_nic(bp, irq_re_init);
5724 if (rc) {
5725 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
5726 goto open_err;
5727 }
5728
5729 if (link_re_init) {
5730 rc = bnxt_update_phy_setting(bp);
5731 if (rc)
Michael Chanba41d462016-02-19 19:43:21 -05005732 netdev_warn(bp->dev, "failed to update phy settings\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04005733 }
5734
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07005735 if (irq_re_init)
Alexander Duyckad51b8e2016-06-16 12:21:19 -07005736 udp_tunnel_get_rx_info(bp->dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04005737
Michael Chancaefe522015-12-09 19:35:42 -05005738 set_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04005739 bnxt_enable_int(bp);
5740 /* Enable TX queues */
5741 bnxt_tx_enable(bp);
5742 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan10289be2016-05-15 03:04:49 -04005743 /* Poll link status and check for SFP+ module status */
5744 bnxt_get_port_module_status(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005745
5746 return 0;
5747
5748open_err:
5749 bnxt_disable_napi(bp);
5750 bnxt_del_napi(bp);
5751
5752open_err_free_mem:
5753 bnxt_free_skbs(bp);
5754 bnxt_free_irq(bp);
5755 bnxt_free_mem(bp, true);
5756 return rc;
5757}
5758
5759/* rtnl_lock held */
5760int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5761{
5762 int rc = 0;
5763
5764 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
5765 if (rc) {
5766 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
5767 dev_close(bp->dev);
5768 }
5769 return rc;
5770}
5771
5772static int bnxt_open(struct net_device *dev)
5773{
5774 struct bnxt *bp = netdev_priv(dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04005775
Michael Chanc0c050c2015-10-22 16:01:17 -04005776 return __bnxt_open_nic(bp, true, true);
5777}
5778
Michael Chanc0c050c2015-10-22 16:01:17 -04005779int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5780{
5781 int rc = 0;
5782
5783#ifdef CONFIG_BNXT_SRIOV
5784 if (bp->sriov_cfg) {
5785 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
5786 !bp->sriov_cfg,
5787 BNXT_SRIOV_CFG_WAIT_TMO);
5788 if (rc)
5789 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
5790 }
5791#endif
5792 /* Change device state to avoid TX queue wake up's */
5793 bnxt_tx_disable(bp);
5794
Michael Chancaefe522015-12-09 19:35:42 -05005795 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chan4cebdce2015-12-09 19:35:43 -05005796 smp_mb__after_atomic();
5797 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
5798 msleep(20);
Michael Chanc0c050c2015-10-22 16:01:17 -04005799
Michael Chan9d8bc092016-12-29 12:13:33 -05005800 /* Flush rings and and disable interrupts */
Michael Chanc0c050c2015-10-22 16:01:17 -04005801 bnxt_shutdown_nic(bp, irq_re_init);
5802
5803 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
5804
5805 bnxt_disable_napi(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005806 del_timer_sync(&bp->timer);
5807 bnxt_free_skbs(bp);
5808
5809 if (irq_re_init) {
5810 bnxt_free_irq(bp);
5811 bnxt_del_napi(bp);
5812 }
5813 bnxt_free_mem(bp, irq_re_init);
5814 return rc;
5815}
5816
5817static int bnxt_close(struct net_device *dev)
5818{
5819 struct bnxt *bp = netdev_priv(dev);
5820
5821 bnxt_close_nic(bp, true, true);
Michael Chan33f7d552016-04-11 04:11:12 -04005822 bnxt_hwrm_shutdown_link(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005823 return 0;
5824}
5825
5826/* rtnl_lock held */
5827static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5828{
5829 switch (cmd) {
5830 case SIOCGMIIPHY:
5831 /* fallthru */
5832 case SIOCGMIIREG: {
5833 if (!netif_running(dev))
5834 return -EAGAIN;
5835
5836 return 0;
5837 }
5838
5839 case SIOCSMIIREG:
5840 if (!netif_running(dev))
5841 return -EAGAIN;
5842
5843 return 0;
5844
5845 default:
5846 /* do nothing */
5847 break;
5848 }
5849 return -EOPNOTSUPP;
5850}
5851
5852static struct rtnl_link_stats64 *
5853bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5854{
5855 u32 i;
5856 struct bnxt *bp = netdev_priv(dev);
5857
5858 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5859
5860 if (!bp->bnapi)
5861 return stats;
5862
5863 /* TODO check if we need to synchronize with bnxt_close path */
5864 for (i = 0; i < bp->cp_nr_rings; i++) {
5865 struct bnxt_napi *bnapi = bp->bnapi[i];
5866 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5867 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
5868
5869 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
5870 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
5871 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
5872
5873 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
5874 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
5875 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
5876
5877 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
5878 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
5879 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
5880
5881 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
5882 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
5883 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
5884
5885 stats->rx_missed_errors +=
5886 le64_to_cpu(hw_stats->rx_discard_pkts);
5887
5888 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
5889
Michael Chanc0c050c2015-10-22 16:01:17 -04005890 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
5891 }
5892
Michael Chan9947f832016-03-07 15:38:46 -05005893 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5894 struct rx_port_stats *rx = bp->hw_rx_port_stats;
5895 struct tx_port_stats *tx = bp->hw_tx_port_stats;
5896
5897 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
5898 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
5899 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
5900 le64_to_cpu(rx->rx_ovrsz_frames) +
5901 le64_to_cpu(rx->rx_runt_frames);
5902 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
5903 le64_to_cpu(rx->rx_jbr_frames);
5904 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
5905 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
5906 stats->tx_errors = le64_to_cpu(tx->tx_err);
5907 }
5908
Michael Chanc0c050c2015-10-22 16:01:17 -04005909 return stats;
5910}
5911
5912static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
5913{
5914 struct net_device *dev = bp->dev;
5915 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5916 struct netdev_hw_addr *ha;
5917 u8 *haddr;
5918 int mc_count = 0;
5919 bool update = false;
5920 int off = 0;
5921
5922 netdev_for_each_mc_addr(ha, dev) {
5923 if (mc_count >= BNXT_MAX_MC_ADDRS) {
5924 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5925 vnic->mc_list_count = 0;
5926 return false;
5927 }
5928 haddr = ha->addr;
5929 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
5930 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
5931 update = true;
5932 }
5933 off += ETH_ALEN;
5934 mc_count++;
5935 }
5936 if (mc_count)
5937 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
5938
5939 if (mc_count != vnic->mc_list_count) {
5940 vnic->mc_list_count = mc_count;
5941 update = true;
5942 }
5943 return update;
5944}
5945
5946static bool bnxt_uc_list_updated(struct bnxt *bp)
5947{
5948 struct net_device *dev = bp->dev;
5949 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5950 struct netdev_hw_addr *ha;
5951 int off = 0;
5952
5953 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
5954 return true;
5955
5956 netdev_for_each_uc_addr(ha, dev) {
5957 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
5958 return true;
5959
5960 off += ETH_ALEN;
5961 }
5962 return false;
5963}
5964
5965static void bnxt_set_rx_mode(struct net_device *dev)
5966{
5967 struct bnxt *bp = netdev_priv(dev);
5968 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5969 u32 mask = vnic->rx_mask;
5970 bool mc_update = false;
5971 bool uc_update;
5972
5973 if (!netif_running(dev))
5974 return;
5975
5976 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
5977 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
5978 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
5979
Michael Chan17c71ac2016-07-01 18:46:27 -04005980 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04005981 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5982
5983 uc_update = bnxt_uc_list_updated(bp);
5984
5985 if (dev->flags & IFF_ALLMULTI) {
5986 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5987 vnic->mc_list_count = 0;
5988 } else {
5989 mc_update = bnxt_mc_list_updated(bp, &mask);
5990 }
5991
5992 if (mask != vnic->rx_mask || uc_update || mc_update) {
5993 vnic->rx_mask = mask;
5994
5995 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
5996 schedule_work(&bp->sp_task);
5997 }
5998}
5999
Michael Chanb664f002015-12-02 01:54:08 -05006000static int bnxt_cfg_rx_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04006001{
6002 struct net_device *dev = bp->dev;
6003 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6004 struct netdev_hw_addr *ha;
6005 int i, off = 0, rc;
6006 bool uc_update;
6007
6008 netif_addr_lock_bh(dev);
6009 uc_update = bnxt_uc_list_updated(bp);
6010 netif_addr_unlock_bh(dev);
6011
6012 if (!uc_update)
6013 goto skip_uc;
6014
6015 mutex_lock(&bp->hwrm_cmd_lock);
6016 for (i = 1; i < vnic->uc_filter_count; i++) {
6017 struct hwrm_cfa_l2_filter_free_input req = {0};
6018
6019 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
6020 -1);
6021
6022 req.l2_filter_id = vnic->fw_l2_filter_id[i];
6023
6024 rc = _hwrm_send_message(bp, &req, sizeof(req),
6025 HWRM_CMD_TIMEOUT);
6026 }
6027 mutex_unlock(&bp->hwrm_cmd_lock);
6028
6029 vnic->uc_filter_count = 1;
6030
6031 netif_addr_lock_bh(dev);
6032 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
6033 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6034 } else {
6035 netdev_for_each_uc_addr(ha, dev) {
6036 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
6037 off += ETH_ALEN;
6038 vnic->uc_filter_count++;
6039 }
6040 }
6041 netif_addr_unlock_bh(dev);
6042
6043 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
6044 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
6045 if (rc) {
6046 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
6047 rc);
6048 vnic->uc_filter_count = i;
Michael Chanb664f002015-12-02 01:54:08 -05006049 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006050 }
6051 }
6052
6053skip_uc:
6054 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
6055 if (rc)
6056 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
6057 rc);
Michael Chanb664f002015-12-02 01:54:08 -05006058
6059 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006060}
6061
Michael Chan8079e8f2016-12-29 12:13:37 -05006062/* If the chip and firmware supports RFS */
6063static bool bnxt_rfs_supported(struct bnxt *bp)
6064{
6065 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6066 return true;
Michael Chanae10ae72016-12-29 12:13:38 -05006067 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6068 return true;
Michael Chan8079e8f2016-12-29 12:13:37 -05006069 return false;
6070}
6071
6072/* If runtime conditions support RFS */
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006073static bool bnxt_rfs_capable(struct bnxt *bp)
6074{
6075#ifdef CONFIG_RFS_ACCEL
Michael Chan8079e8f2016-12-29 12:13:37 -05006076 int vnics, max_vnics, max_rss_ctxs;
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006077
6078 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
6079 return false;
6080
6081 vnics = 1 + bp->rx_nr_rings;
Michael Chan8079e8f2016-12-29 12:13:37 -05006082 max_vnics = bnxt_get_max_func_vnics(bp);
6083 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
Michael Chanae10ae72016-12-29 12:13:38 -05006084
6085 /* RSS contexts not a limiting factor */
6086 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6087 max_rss_ctxs = max_vnics;
Michael Chan8079e8f2016-12-29 12:13:37 -05006088 if (vnics > max_vnics || vnics > max_rss_ctxs) {
Vasundhara Volama2304902016-07-25 12:33:36 -04006089 netdev_warn(bp->dev,
6090 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
Michael Chan8079e8f2016-12-29 12:13:37 -05006091 min(max_rss_ctxs - 1, max_vnics - 1));
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006092 return false;
Vasundhara Volama2304902016-07-25 12:33:36 -04006093 }
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006094
6095 return true;
6096#else
6097 return false;
6098#endif
6099}
6100
Michael Chanc0c050c2015-10-22 16:01:17 -04006101static netdev_features_t bnxt_fix_features(struct net_device *dev,
6102 netdev_features_t features)
6103{
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006104 struct bnxt *bp = netdev_priv(dev);
6105
Vasundhara Volama2304902016-07-25 12:33:36 -04006106 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006107 features &= ~NETIF_F_NTUPLE;
Michael Chan5a9f6b22016-06-06 02:37:15 -04006108
6109 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6110 * turned on or off together.
6111 */
6112 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
6113 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
6114 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
6115 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6116 NETIF_F_HW_VLAN_STAG_RX);
6117 else
6118 features |= NETIF_F_HW_VLAN_CTAG_RX |
6119 NETIF_F_HW_VLAN_STAG_RX;
6120 }
Michael Chancf6645f2016-06-13 02:25:28 -04006121#ifdef CONFIG_BNXT_SRIOV
6122 if (BNXT_VF(bp)) {
6123 if (bp->vf.vlan) {
6124 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6125 NETIF_F_HW_VLAN_STAG_RX);
6126 }
6127 }
6128#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04006129 return features;
6130}
6131
6132static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
6133{
6134 struct bnxt *bp = netdev_priv(dev);
6135 u32 flags = bp->flags;
6136 u32 changes;
6137 int rc = 0;
6138 bool re_init = false;
6139 bool update_tpa = false;
6140
6141 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006142 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04006143 flags |= BNXT_FLAG_GRO;
6144 if (features & NETIF_F_LRO)
6145 flags |= BNXT_FLAG_LRO;
6146
6147 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6148 flags |= BNXT_FLAG_STRIP_VLAN;
6149
6150 if (features & NETIF_F_NTUPLE)
6151 flags |= BNXT_FLAG_RFS;
6152
6153 changes = flags ^ bp->flags;
6154 if (changes & BNXT_FLAG_TPA) {
6155 update_tpa = true;
6156 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6157 (flags & BNXT_FLAG_TPA) == 0)
6158 re_init = true;
6159 }
6160
6161 if (changes & ~BNXT_FLAG_TPA)
6162 re_init = true;
6163
6164 if (flags != bp->flags) {
6165 u32 old_flags = bp->flags;
6166
6167 bp->flags = flags;
6168
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006169 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04006170 if (update_tpa)
6171 bnxt_set_ring_params(bp);
6172 return rc;
6173 }
6174
6175 if (re_init) {
6176 bnxt_close_nic(bp, false, false);
6177 if (update_tpa)
6178 bnxt_set_ring_params(bp);
6179
6180 return bnxt_open_nic(bp, false, false);
6181 }
6182 if (update_tpa) {
6183 rc = bnxt_set_tpa(bp,
6184 (flags & BNXT_FLAG_TPA) ?
6185 true : false);
6186 if (rc)
6187 bp->flags = old_flags;
6188 }
6189 }
6190 return rc;
6191}
6192
Michael Chan9f554592016-01-02 23:44:58 -05006193static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6194{
Michael Chanb6ab4b02016-01-02 23:44:59 -05006195 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05006196 int i = bnapi->index;
6197
Michael Chan3b2b7d92016-01-02 23:45:00 -05006198 if (!txr)
6199 return;
6200
Michael Chan9f554592016-01-02 23:44:58 -05006201 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6202 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6203 txr->tx_cons);
6204}
6205
6206static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6207{
Michael Chanb6ab4b02016-01-02 23:44:59 -05006208 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05006209 int i = bnapi->index;
6210
Michael Chan3b2b7d92016-01-02 23:45:00 -05006211 if (!rxr)
6212 return;
6213
Michael Chan9f554592016-01-02 23:44:58 -05006214 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6215 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6216 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6217 rxr->rx_sw_agg_prod);
6218}
6219
6220static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6221{
6222 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6223 int i = bnapi->index;
6224
6225 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6226 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6227}
6228
Michael Chanc0c050c2015-10-22 16:01:17 -04006229static void bnxt_dbg_dump_states(struct bnxt *bp)
6230{
6231 int i;
6232 struct bnxt_napi *bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04006233
6234 for (i = 0; i < bp->cp_nr_rings; i++) {
6235 bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04006236 if (netif_msg_drv(bp)) {
Michael Chan9f554592016-01-02 23:44:58 -05006237 bnxt_dump_tx_sw_state(bnapi);
6238 bnxt_dump_rx_sw_state(bnapi);
6239 bnxt_dump_cp_sw_state(bnapi);
Michael Chanc0c050c2015-10-22 16:01:17 -04006240 }
6241 }
6242}
6243
Michael Chan6988bd92016-06-13 02:25:29 -04006244static void bnxt_reset_task(struct bnxt *bp, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04006245{
Michael Chan6988bd92016-06-13 02:25:29 -04006246 if (!silent)
6247 bnxt_dbg_dump_states(bp);
Michael Chan028de142015-12-09 19:35:44 -05006248 if (netif_running(bp->dev)) {
6249 bnxt_close_nic(bp, false, false);
6250 bnxt_open_nic(bp, false, false);
6251 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006252}
6253
6254static void bnxt_tx_timeout(struct net_device *dev)
6255{
6256 struct bnxt *bp = netdev_priv(dev);
6257
6258 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6259 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6260 schedule_work(&bp->sp_task);
6261}
6262
6263#ifdef CONFIG_NET_POLL_CONTROLLER
6264static void bnxt_poll_controller(struct net_device *dev)
6265{
6266 struct bnxt *bp = netdev_priv(dev);
6267 int i;
6268
6269 for (i = 0; i < bp->cp_nr_rings; i++) {
6270 struct bnxt_irq *irq = &bp->irq_tbl[i];
6271
6272 disable_irq(irq->vector);
6273 irq->handler(irq->vector, bp->bnapi[i]);
6274 enable_irq(irq->vector);
6275 }
6276}
6277#endif
6278
6279static void bnxt_timer(unsigned long data)
6280{
6281 struct bnxt *bp = (struct bnxt *)data;
6282 struct net_device *dev = bp->dev;
6283
6284 if (!netif_running(dev))
6285 return;
6286
6287 if (atomic_read(&bp->intr_sem) != 0)
6288 goto bnxt_restart_timer;
6289
Michael Chan3bdf56c2016-03-07 15:38:45 -05006290 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
6291 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6292 schedule_work(&bp->sp_task);
6293 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006294bnxt_restart_timer:
6295 mod_timer(&bp->timer, jiffies + bp->current_interval);
6296}
6297
Michael Chan6988bd92016-06-13 02:25:29 -04006298/* Only called from bnxt_sp_task() */
6299static void bnxt_reset(struct bnxt *bp, bool silent)
6300{
6301 /* bnxt_reset_task() calls bnxt_close_nic() which waits
6302 * for BNXT_STATE_IN_SP_TASK to clear.
6303 * If there is a parallel dev_close(), bnxt_close() may be holding
6304 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6305 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6306 */
6307 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6308 rtnl_lock();
6309 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6310 bnxt_reset_task(bp, silent);
6311 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6312 rtnl_unlock();
6313}
6314
Michael Chanc0c050c2015-10-22 16:01:17 -04006315static void bnxt_cfg_ntp_filters(struct bnxt *);
6316
6317static void bnxt_sp_task(struct work_struct *work)
6318{
6319 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6320 int rc;
6321
Michael Chan4cebdce2015-12-09 19:35:43 -05006322 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6323 smp_mb__after_atomic();
6324 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6325 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006326 return;
Michael Chan4cebdce2015-12-09 19:35:43 -05006327 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006328
6329 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
6330 bnxt_cfg_rx_mode(bp);
6331
6332 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6333 bnxt_cfg_ntp_filters(bp);
6334 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
Michael Chan286ef9d2016-11-16 21:13:08 -05006335 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6336 &bp->sp_event))
6337 bnxt_hwrm_phy_qcaps(bp);
6338
Michael Chanc0c050c2015-10-22 16:01:17 -04006339 rc = bnxt_update_link(bp, true);
6340 if (rc)
6341 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6342 rc);
6343 }
6344 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6345 bnxt_hwrm_exec_fwd_req(bp);
6346 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6347 bnxt_hwrm_tunnel_dst_port_alloc(
6348 bp, bp->vxlan_port,
6349 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6350 }
6351 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6352 bnxt_hwrm_tunnel_dst_port_free(
6353 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6354 }
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006355 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6356 bnxt_hwrm_tunnel_dst_port_alloc(
6357 bp, bp->nge_port,
6358 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6359 }
6360 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6361 bnxt_hwrm_tunnel_dst_port_free(
6362 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6363 }
Michael Chan6988bd92016-06-13 02:25:29 -04006364 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6365 bnxt_reset(bp, false);
Michael Chan4cebdce2015-12-09 19:35:43 -05006366
Michael Chanfc0f1922016-06-13 02:25:30 -04006367 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6368 bnxt_reset(bp, true);
6369
Michael Chan4bb13ab2016-04-05 14:09:01 -04006370 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
Michael Chan10289be2016-05-15 03:04:49 -04006371 bnxt_get_port_module_status(bp);
Michael Chan4bb13ab2016-04-05 14:09:01 -04006372
Michael Chan3bdf56c2016-03-07 15:38:45 -05006373 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6374 bnxt_hwrm_port_qstats(bp);
6375
Michael Chan4cebdce2015-12-09 19:35:43 -05006376 smp_mb__before_atomic();
6377 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006378}
6379
6380static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
6381{
6382 int rc;
6383 struct bnxt *bp = netdev_priv(dev);
6384
6385 SET_NETDEV_DEV(dev, &pdev->dev);
6386
6387 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6388 rc = pci_enable_device(pdev);
6389 if (rc) {
6390 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
6391 goto init_err;
6392 }
6393
6394 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6395 dev_err(&pdev->dev,
6396 "Cannot find PCI device base address, aborting\n");
6397 rc = -ENODEV;
6398 goto init_err_disable;
6399 }
6400
6401 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6402 if (rc) {
6403 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
6404 goto init_err_disable;
6405 }
6406
6407 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
6408 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6409 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
6410 goto init_err_disable;
6411 }
6412
6413 pci_set_master(pdev);
6414
6415 bp->dev = dev;
6416 bp->pdev = pdev;
6417
6418 bp->bar0 = pci_ioremap_bar(pdev, 0);
6419 if (!bp->bar0) {
6420 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
6421 rc = -ENOMEM;
6422 goto init_err_release;
6423 }
6424
6425 bp->bar1 = pci_ioremap_bar(pdev, 2);
6426 if (!bp->bar1) {
6427 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
6428 rc = -ENOMEM;
6429 goto init_err_release;
6430 }
6431
6432 bp->bar2 = pci_ioremap_bar(pdev, 4);
6433 if (!bp->bar2) {
6434 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
6435 rc = -ENOMEM;
6436 goto init_err_release;
6437 }
6438
Satish Baddipadige6316ea62016-03-07 15:38:48 -05006439 pci_enable_pcie_error_reporting(pdev);
6440
Michael Chanc0c050c2015-10-22 16:01:17 -04006441 INIT_WORK(&bp->sp_task, bnxt_sp_task);
6442
6443 spin_lock_init(&bp->ntp_fltr_lock);
6444
6445 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
6446 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
6447
Michael Chandfb5b892016-02-26 04:00:01 -05006448 /* tick values in micro seconds */
Michael Chandfc9c942016-02-26 04:00:03 -05006449 bp->rx_coal_ticks = 12;
6450 bp->rx_coal_bufs = 30;
Michael Chandfb5b892016-02-26 04:00:01 -05006451 bp->rx_coal_ticks_irq = 1;
6452 bp->rx_coal_bufs_irq = 2;
Michael Chanc0c050c2015-10-22 16:01:17 -04006453
Michael Chandfc9c942016-02-26 04:00:03 -05006454 bp->tx_coal_ticks = 25;
6455 bp->tx_coal_bufs = 30;
6456 bp->tx_coal_ticks_irq = 2;
6457 bp->tx_coal_bufs_irq = 2;
6458
Michael Chan51f30782016-07-01 18:46:29 -04006459 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
6460
Michael Chanc0c050c2015-10-22 16:01:17 -04006461 init_timer(&bp->timer);
6462 bp->timer.data = (unsigned long)bp;
6463 bp->timer.function = bnxt_timer;
6464 bp->current_interval = BNXT_TIMER_INTERVAL;
6465
Michael Chancaefe522015-12-09 19:35:42 -05006466 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006467
6468 return 0;
6469
6470init_err_release:
6471 if (bp->bar2) {
6472 pci_iounmap(pdev, bp->bar2);
6473 bp->bar2 = NULL;
6474 }
6475
6476 if (bp->bar1) {
6477 pci_iounmap(pdev, bp->bar1);
6478 bp->bar1 = NULL;
6479 }
6480
6481 if (bp->bar0) {
6482 pci_iounmap(pdev, bp->bar0);
6483 bp->bar0 = NULL;
6484 }
6485
6486 pci_release_regions(pdev);
6487
6488init_err_disable:
6489 pci_disable_device(pdev);
6490
6491init_err:
6492 return rc;
6493}
6494
6495/* rtnl_lock held */
6496static int bnxt_change_mac_addr(struct net_device *dev, void *p)
6497{
6498 struct sockaddr *addr = p;
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05006499 struct bnxt *bp = netdev_priv(dev);
6500 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006501
6502 if (!is_valid_ether_addr(addr->sa_data))
6503 return -EADDRNOTAVAIL;
6504
Michael Chan84c33dd2016-04-11 04:11:13 -04006505 rc = bnxt_approve_mac(bp, addr->sa_data);
6506 if (rc)
6507 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006508
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05006509 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
6510 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006511
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05006512 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6513 if (netif_running(dev)) {
6514 bnxt_close_nic(bp, false, false);
6515 rc = bnxt_open_nic(bp, false, false);
6516 }
6517
6518 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006519}
6520
6521/* rtnl_lock held */
6522static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
6523{
6524 struct bnxt *bp = netdev_priv(dev);
6525
Michael Chanc0c050c2015-10-22 16:01:17 -04006526 if (netif_running(dev))
6527 bnxt_close_nic(bp, false, false);
6528
6529 dev->mtu = new_mtu;
6530 bnxt_set_ring_params(bp);
6531
6532 if (netif_running(dev))
6533 return bnxt_open_nic(bp, false, false);
6534
6535 return 0;
6536}
6537
Michael Chanc5e3deb2016-12-02 21:17:15 -05006538int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
Michael Chanc0c050c2015-10-22 16:01:17 -04006539{
6540 struct bnxt *bp = netdev_priv(dev);
Michael Chan3ffb6a32016-11-11 00:11:42 -05006541 bool sh = false;
John Fastabend16e5cc62016-02-16 21:16:43 -08006542
Michael Chanc0c050c2015-10-22 16:01:17 -04006543 if (tc > bp->max_tc) {
6544 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
6545 tc, bp->max_tc);
6546 return -EINVAL;
6547 }
6548
6549 if (netdev_get_num_tc(dev) == tc)
6550 return 0;
6551
Michael Chan3ffb6a32016-11-11 00:11:42 -05006552 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6553 sh = true;
6554
Michael Chanc0c050c2015-10-22 16:01:17 -04006555 if (tc) {
Michael Chan391be5c2016-12-29 12:13:41 -05006556 int max_rx_rings, max_tx_rings, req_tx_rings, rsv_tx_rings, rc;
Michael Chan01657bc2016-01-02 23:45:03 -05006557
Michael Chan391be5c2016-12-29 12:13:41 -05006558 req_tx_rings = bp->tx_nr_rings_per_tc * tc;
Michael Chan01657bc2016-01-02 23:45:03 -05006559 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
Michael Chan391be5c2016-12-29 12:13:41 -05006560 if (rc || req_tx_rings > max_tx_rings)
6561 return -ENOMEM;
6562
6563 rsv_tx_rings = req_tx_rings;
6564 if (bnxt_hwrm_reserve_tx_rings(bp, &rsv_tx_rings) ||
6565 rsv_tx_rings < req_tx_rings)
Michael Chanc0c050c2015-10-22 16:01:17 -04006566 return -ENOMEM;
6567 }
6568
6569 /* Needs to close the device and do hw resource re-allocations */
6570 if (netif_running(bp->dev))
6571 bnxt_close_nic(bp, true, false);
6572
6573 if (tc) {
6574 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
6575 netdev_set_num_tc(dev, tc);
6576 } else {
6577 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6578 netdev_reset_tc(dev);
6579 }
Michael Chan3ffb6a32016-11-11 00:11:42 -05006580 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6581 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04006582 bp->num_stat_ctxs = bp->cp_nr_rings;
6583
6584 if (netif_running(bp->dev))
6585 return bnxt_open_nic(bp, true, false);
6586
6587 return 0;
6588}
6589
Michael Chanc5e3deb2016-12-02 21:17:15 -05006590static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6591 struct tc_to_netdev *ntc)
6592{
6593 if (ntc->type != TC_SETUP_MQPRIO)
6594 return -EINVAL;
6595
6596 return bnxt_setup_mq_tc(dev, ntc->tc);
6597}
6598
Michael Chanc0c050c2015-10-22 16:01:17 -04006599#ifdef CONFIG_RFS_ACCEL
6600static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
6601 struct bnxt_ntuple_filter *f2)
6602{
6603 struct flow_keys *keys1 = &f1->fkeys;
6604 struct flow_keys *keys2 = &f2->fkeys;
6605
6606 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
6607 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
6608 keys1->ports.ports == keys2->ports.ports &&
6609 keys1->basic.ip_proto == keys2->basic.ip_proto &&
6610 keys1->basic.n_proto == keys2->basic.n_proto &&
Michael Chana54c4d72016-07-25 12:33:35 -04006611 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
6612 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
Michael Chanc0c050c2015-10-22 16:01:17 -04006613 return true;
6614
6615 return false;
6616}
6617
6618static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
6619 u16 rxq_index, u32 flow_id)
6620{
6621 struct bnxt *bp = netdev_priv(dev);
6622 struct bnxt_ntuple_filter *fltr, *new_fltr;
6623 struct flow_keys *fkeys;
6624 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
Michael Chana54c4d72016-07-25 12:33:35 -04006625 int rc = 0, idx, bit_id, l2_idx = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006626 struct hlist_head *head;
6627
6628 if (skb->encapsulation)
6629 return -EPROTONOSUPPORT;
6630
Michael Chana54c4d72016-07-25 12:33:35 -04006631 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
6632 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6633 int off = 0, j;
6634
6635 netif_addr_lock_bh(dev);
6636 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
6637 if (ether_addr_equal(eth->h_dest,
6638 vnic->uc_list + off)) {
6639 l2_idx = j + 1;
6640 break;
6641 }
6642 }
6643 netif_addr_unlock_bh(dev);
6644 if (!l2_idx)
6645 return -EINVAL;
6646 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006647 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
6648 if (!new_fltr)
6649 return -ENOMEM;
6650
6651 fkeys = &new_fltr->fkeys;
6652 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
6653 rc = -EPROTONOSUPPORT;
6654 goto err_free;
6655 }
6656
Michael Chandda0e742016-12-29 12:13:40 -05006657 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
6658 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
Michael Chanc0c050c2015-10-22 16:01:17 -04006659 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
6660 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
6661 rc = -EPROTONOSUPPORT;
6662 goto err_free;
6663 }
Michael Chandda0e742016-12-29 12:13:40 -05006664 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
6665 bp->hwrm_spec_code < 0x10601) {
6666 rc = -EPROTONOSUPPORT;
6667 goto err_free;
6668 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006669
Michael Chana54c4d72016-07-25 12:33:35 -04006670 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04006671 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
6672
6673 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
6674 head = &bp->ntp_fltr_hash_tbl[idx];
6675 rcu_read_lock();
6676 hlist_for_each_entry_rcu(fltr, head, hash) {
6677 if (bnxt_fltr_match(fltr, new_fltr)) {
6678 rcu_read_unlock();
6679 rc = 0;
6680 goto err_free;
6681 }
6682 }
6683 rcu_read_unlock();
6684
6685 spin_lock_bh(&bp->ntp_fltr_lock);
Michael Chan84e86b92015-11-05 16:25:50 -05006686 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6687 BNXT_NTP_FLTR_MAX_FLTR, 0);
6688 if (bit_id < 0) {
Michael Chanc0c050c2015-10-22 16:01:17 -04006689 spin_unlock_bh(&bp->ntp_fltr_lock);
6690 rc = -ENOMEM;
6691 goto err_free;
6692 }
6693
Michael Chan84e86b92015-11-05 16:25:50 -05006694 new_fltr->sw_id = (u16)bit_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04006695 new_fltr->flow_id = flow_id;
Michael Chana54c4d72016-07-25 12:33:35 -04006696 new_fltr->l2_fltr_idx = l2_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04006697 new_fltr->rxq = rxq_index;
6698 hlist_add_head_rcu(&new_fltr->hash, head);
6699 bp->ntp_fltr_count++;
6700 spin_unlock_bh(&bp->ntp_fltr_lock);
6701
6702 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
6703 schedule_work(&bp->sp_task);
6704
6705 return new_fltr->sw_id;
6706
6707err_free:
6708 kfree(new_fltr);
6709 return rc;
6710}
6711
6712static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6713{
6714 int i;
6715
6716 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
6717 struct hlist_head *head;
6718 struct hlist_node *tmp;
6719 struct bnxt_ntuple_filter *fltr;
6720 int rc;
6721
6722 head = &bp->ntp_fltr_hash_tbl[i];
6723 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
6724 bool del = false;
6725
6726 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
6727 if (rps_may_expire_flow(bp->dev, fltr->rxq,
6728 fltr->flow_id,
6729 fltr->sw_id)) {
6730 bnxt_hwrm_cfa_ntuple_filter_free(bp,
6731 fltr);
6732 del = true;
6733 }
6734 } else {
6735 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
6736 fltr);
6737 if (rc)
6738 del = true;
6739 else
6740 set_bit(BNXT_FLTR_VALID, &fltr->state);
6741 }
6742
6743 if (del) {
6744 spin_lock_bh(&bp->ntp_fltr_lock);
6745 hlist_del_rcu(&fltr->hash);
6746 bp->ntp_fltr_count--;
6747 spin_unlock_bh(&bp->ntp_fltr_lock);
6748 synchronize_rcu();
6749 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
6750 kfree(fltr);
6751 }
6752 }
6753 }
Jeffrey Huang19241362016-02-26 04:00:00 -05006754 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
6755 netdev_info(bp->dev, "Receive PF driver unload event!");
Michael Chanc0c050c2015-10-22 16:01:17 -04006756}
6757
6758#else
6759
6760static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6761{
6762}
6763
6764#endif /* CONFIG_RFS_ACCEL */
6765
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006766static void bnxt_udp_tunnel_add(struct net_device *dev,
6767 struct udp_tunnel_info *ti)
Michael Chanc0c050c2015-10-22 16:01:17 -04006768{
6769 struct bnxt *bp = netdev_priv(dev);
6770
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006771 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6772 return;
6773
Michael Chanc0c050c2015-10-22 16:01:17 -04006774 if (!netif_running(dev))
6775 return;
6776
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006777 switch (ti->type) {
6778 case UDP_TUNNEL_TYPE_VXLAN:
6779 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
6780 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04006781
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006782 bp->vxlan_port_cnt++;
6783 if (bp->vxlan_port_cnt == 1) {
6784 bp->vxlan_port = ti->port;
6785 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04006786 schedule_work(&bp->sp_task);
6787 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006788 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006789 case UDP_TUNNEL_TYPE_GENEVE:
6790 if (bp->nge_port_cnt && bp->nge_port != ti->port)
6791 return;
6792
6793 bp->nge_port_cnt++;
6794 if (bp->nge_port_cnt == 1) {
6795 bp->nge_port = ti->port;
6796 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
6797 }
6798 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006799 default:
6800 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04006801 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006802
6803 schedule_work(&bp->sp_task);
6804}
6805
6806static void bnxt_udp_tunnel_del(struct net_device *dev,
6807 struct udp_tunnel_info *ti)
6808{
6809 struct bnxt *bp = netdev_priv(dev);
6810
6811 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6812 return;
6813
6814 if (!netif_running(dev))
6815 return;
6816
6817 switch (ti->type) {
6818 case UDP_TUNNEL_TYPE_VXLAN:
6819 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
6820 return;
6821 bp->vxlan_port_cnt--;
6822
6823 if (bp->vxlan_port_cnt != 0)
6824 return;
6825
6826 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
6827 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006828 case UDP_TUNNEL_TYPE_GENEVE:
6829 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
6830 return;
6831 bp->nge_port_cnt--;
6832
6833 if (bp->nge_port_cnt != 0)
6834 return;
6835
6836 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
6837 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006838 default:
6839 return;
6840 }
6841
6842 schedule_work(&bp->sp_task);
Michael Chanc0c050c2015-10-22 16:01:17 -04006843}
6844
6845static const struct net_device_ops bnxt_netdev_ops = {
6846 .ndo_open = bnxt_open,
6847 .ndo_start_xmit = bnxt_start_xmit,
6848 .ndo_stop = bnxt_close,
6849 .ndo_get_stats64 = bnxt_get_stats64,
6850 .ndo_set_rx_mode = bnxt_set_rx_mode,
6851 .ndo_do_ioctl = bnxt_ioctl,
6852 .ndo_validate_addr = eth_validate_addr,
6853 .ndo_set_mac_address = bnxt_change_mac_addr,
6854 .ndo_change_mtu = bnxt_change_mtu,
6855 .ndo_fix_features = bnxt_fix_features,
6856 .ndo_set_features = bnxt_set_features,
6857 .ndo_tx_timeout = bnxt_tx_timeout,
6858#ifdef CONFIG_BNXT_SRIOV
6859 .ndo_get_vf_config = bnxt_get_vf_config,
6860 .ndo_set_vf_mac = bnxt_set_vf_mac,
6861 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
6862 .ndo_set_vf_rate = bnxt_set_vf_bw,
6863 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
6864 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
6865#endif
6866#ifdef CONFIG_NET_POLL_CONTROLLER
6867 .ndo_poll_controller = bnxt_poll_controller,
6868#endif
6869 .ndo_setup_tc = bnxt_setup_tc,
6870#ifdef CONFIG_RFS_ACCEL
6871 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
6872#endif
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006873 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
6874 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
Michael Chanc0c050c2015-10-22 16:01:17 -04006875};
6876
6877static void bnxt_remove_one(struct pci_dev *pdev)
6878{
6879 struct net_device *dev = pci_get_drvdata(pdev);
6880 struct bnxt *bp = netdev_priv(dev);
6881
6882 if (BNXT_PF(bp))
6883 bnxt_sriov_disable(bp);
6884
Satish Baddipadige6316ea62016-03-07 15:38:48 -05006885 pci_disable_pcie_error_reporting(pdev);
Michael Chanc0c050c2015-10-22 16:01:17 -04006886 unregister_netdev(dev);
6887 cancel_work_sync(&bp->sp_task);
6888 bp->sp_event = 0;
6889
Michael Chan78095922016-12-07 00:26:16 -05006890 bnxt_clear_int_mode(bp);
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05006891 bnxt_hwrm_func_drv_unrgtr(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006892 bnxt_free_hwrm_resources(bp);
Michael Chan7df4ae92016-12-02 21:17:17 -05006893 bnxt_dcb_free(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006894 pci_iounmap(pdev, bp->bar2);
6895 pci_iounmap(pdev, bp->bar1);
6896 pci_iounmap(pdev, bp->bar0);
Michael Chana588e452016-12-07 00:26:21 -05006897 kfree(bp->edev);
6898 bp->edev = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04006899 free_netdev(dev);
6900
6901 pci_release_regions(pdev);
6902 pci_disable_device(pdev);
6903}
6904
6905static int bnxt_probe_phy(struct bnxt *bp)
6906{
6907 int rc = 0;
6908 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chanc0c050c2015-10-22 16:01:17 -04006909
Michael Chan170ce012016-04-05 14:08:57 -04006910 rc = bnxt_hwrm_phy_qcaps(bp);
6911 if (rc) {
6912 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
6913 rc);
6914 return rc;
6915 }
6916
Michael Chanc0c050c2015-10-22 16:01:17 -04006917 rc = bnxt_update_link(bp, false);
6918 if (rc) {
6919 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
6920 rc);
6921 return rc;
6922 }
6923
Michael Chan93ed8112016-06-13 02:25:37 -04006924 /* Older firmware does not have supported_auto_speeds, so assume
6925 * that all supported speeds can be autonegotiated.
6926 */
6927 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
6928 link_info->support_auto_speeds = link_info->support_speeds;
6929
Michael Chanc0c050c2015-10-22 16:01:17 -04006930 /*initialize the ethool setting copy with NVM settings */
Michael Chan0d8abf02016-02-10 17:33:47 -05006931 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
Michael Chanc9ee9512016-04-05 14:08:56 -04006932 link_info->autoneg = BNXT_AUTONEG_SPEED;
6933 if (bp->hwrm_spec_code >= 0x10201) {
6934 if (link_info->auto_pause_setting &
6935 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
6936 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6937 } else {
6938 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6939 }
Michael Chan0d8abf02016-02-10 17:33:47 -05006940 link_info->advertising = link_info->auto_link_speeds;
Michael Chan0d8abf02016-02-10 17:33:47 -05006941 } else {
6942 link_info->req_link_speed = link_info->force_link_speed;
6943 link_info->req_duplex = link_info->duplex_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04006944 }
Michael Chanc9ee9512016-04-05 14:08:56 -04006945 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
6946 link_info->req_flow_ctrl =
6947 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
6948 else
6949 link_info->req_flow_ctrl = link_info->force_pause_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04006950 return rc;
6951}
6952
6953static int bnxt_get_max_irq(struct pci_dev *pdev)
6954{
6955 u16 ctrl;
6956
6957 if (!pdev->msix_cap)
6958 return 1;
6959
6960 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
6961 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
6962}
6963
Michael Chan6e6c5a52016-01-02 23:45:02 -05006964static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
6965 int *max_cp)
Michael Chanc0c050c2015-10-22 16:01:17 -04006966{
Michael Chan6e6c5a52016-01-02 23:45:02 -05006967 int max_ring_grps = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006968
Michael Chan379a80a2015-10-23 15:06:19 -04006969#ifdef CONFIG_BNXT_SRIOV
Arnd Bergmann415b6f12016-01-12 16:05:08 +01006970 if (!BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04006971 *max_tx = bp->vf.max_tx_rings;
6972 *max_rx = bp->vf.max_rx_rings;
Michael Chan6e6c5a52016-01-02 23:45:02 -05006973 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
6974 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
Michael Chanb72d4a62015-12-27 18:19:27 -05006975 max_ring_grps = bp->vf.max_hw_ring_grps;
Arnd Bergmann415b6f12016-01-12 16:05:08 +01006976 } else
Michael Chan379a80a2015-10-23 15:06:19 -04006977#endif
Arnd Bergmann415b6f12016-01-12 16:05:08 +01006978 {
6979 *max_tx = bp->pf.max_tx_rings;
6980 *max_rx = bp->pf.max_rx_rings;
6981 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
6982 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
6983 max_ring_grps = bp->pf.max_hw_ring_grps;
Michael Chanc0c050c2015-10-22 16:01:17 -04006984 }
Prashant Sreedharan76595192016-07-18 07:15:22 -04006985 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
6986 *max_cp -= 1;
6987 *max_rx -= 2;
6988 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006989 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6990 *max_rx >>= 1;
Michael Chanb72d4a62015-12-27 18:19:27 -05006991 *max_rx = min_t(int, *max_rx, max_ring_grps);
Michael Chan6e6c5a52016-01-02 23:45:02 -05006992}
6993
6994int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
6995{
6996 int rx, tx, cp;
6997
6998 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
6999 if (!rx || !tx || !cp)
7000 return -ENOMEM;
7001
7002 *max_rx = rx;
7003 *max_tx = tx;
7004 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
7005}
7006
Michael Chane4060d32016-12-07 00:26:19 -05007007static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7008 bool shared)
7009{
7010 int rc;
7011
7012 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7013 if (rc)
7014 return rc;
7015
7016 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
7017 int max_cp, max_stat, max_irq;
7018
7019 /* Reserve minimum resources for RoCE */
7020 max_cp = bnxt_get_max_func_cp_rings(bp);
7021 max_stat = bnxt_get_max_func_stat_ctxs(bp);
7022 max_irq = bnxt_get_max_func_irqs(bp);
7023 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
7024 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
7025 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
7026 return 0;
7027
7028 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
7029 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
7030 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
7031 max_cp = min_t(int, max_cp, max_irq);
7032 max_cp = min_t(int, max_cp, max_stat);
7033 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
7034 if (rc)
7035 rc = 0;
7036 }
7037 return rc;
7038}
7039
Michael Chan6e6c5a52016-01-02 23:45:02 -05007040static int bnxt_set_dflt_rings(struct bnxt *bp)
7041{
7042 int dflt_rings, max_rx_rings, max_tx_rings, rc;
7043 bool sh = true;
7044
7045 if (sh)
7046 bp->flags |= BNXT_FLAG_SHARED_RINGS;
7047 dflt_rings = netif_get_num_default_rss_queues();
Michael Chane4060d32016-12-07 00:26:19 -05007048 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
Michael Chan6e6c5a52016-01-02 23:45:02 -05007049 if (rc)
7050 return rc;
7051 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
7052 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
Michael Chan391be5c2016-12-29 12:13:41 -05007053
7054 rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
7055 if (rc)
7056 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
7057
Michael Chan6e6c5a52016-01-02 23:45:02 -05007058 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7059 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7060 bp->tx_nr_rings + bp->rx_nr_rings;
7061 bp->num_stat_ctxs = bp->cp_nr_rings;
Prashant Sreedharan76595192016-07-18 07:15:22 -04007062 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7063 bp->rx_nr_rings++;
7064 bp->cp_nr_rings++;
7065 }
Michael Chan6e6c5a52016-01-02 23:45:02 -05007066 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04007067}
7068
Michael Chan7b08f662016-12-07 00:26:18 -05007069void bnxt_restore_pf_fw_resources(struct bnxt *bp)
7070{
7071 ASSERT_RTNL();
7072 bnxt_hwrm_func_qcaps(bp);
Michael Chana588e452016-12-07 00:26:21 -05007073 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
Michael Chan7b08f662016-12-07 00:26:18 -05007074}
7075
Ajit Khaparde90c4f782016-05-15 03:04:45 -04007076static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7077{
7078 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7079 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7080
7081 if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
7082 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7083 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7084 else
7085 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
7086 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
7087 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
7088 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
7089 "Unknown", width);
7090}
7091
Michael Chanc0c050c2015-10-22 16:01:17 -04007092static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7093{
7094 static int version_printed;
7095 struct net_device *dev;
7096 struct bnxt *bp;
Michael Chan6e6c5a52016-01-02 23:45:02 -05007097 int rc, max_irqs;
Michael Chanc0c050c2015-10-22 16:01:17 -04007098
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -04007099 if (pdev->device == 0x16cd && pci_is_bridge(pdev))
7100 return -ENODEV;
7101
Michael Chanc0c050c2015-10-22 16:01:17 -04007102 if (version_printed++ == 0)
7103 pr_info("%s", version);
7104
7105 max_irqs = bnxt_get_max_irq(pdev);
7106 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
7107 if (!dev)
7108 return -ENOMEM;
7109
7110 bp = netdev_priv(dev);
7111
7112 if (bnxt_vf_pciid(ent->driver_data))
7113 bp->flags |= BNXT_FLAG_VF;
7114
Michael Chan2bcfa6f2015-12-27 18:19:24 -05007115 if (pdev->msix_cap)
Michael Chanc0c050c2015-10-22 16:01:17 -04007116 bp->flags |= BNXT_FLAG_MSIX_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -04007117
7118 rc = bnxt_init_board(pdev, dev);
7119 if (rc < 0)
7120 goto init_err_free;
7121
7122 dev->netdev_ops = &bnxt_netdev_ops;
7123 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
7124 dev->ethtool_ops = &bnxt_ethtool_ops;
7125
7126 pci_set_drvdata(pdev, dev);
7127
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04007128 rc = bnxt_alloc_hwrm_resources(bp);
7129 if (rc)
7130 goto init_err;
7131
7132 mutex_init(&bp->hwrm_cmd_lock);
7133 rc = bnxt_hwrm_ver_get(bp);
7134 if (rc)
7135 goto init_err;
7136
Rob Swindell5ac67d82016-09-19 03:58:03 -04007137 bnxt_hwrm_fw_set_time(bp);
7138
Michael Chanc0c050c2015-10-22 16:01:17 -04007139 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7140 NETIF_F_TSO | NETIF_F_TSO6 |
7141 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Tom Herbert7e133182016-05-18 09:06:10 -07007142 NETIF_F_GSO_IPXIP4 |
Alexander Duyck152971e2016-05-02 09:38:55 -07007143 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7144 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04007145 NETIF_F_RXCSUM | NETIF_F_GRO;
7146
7147 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
7148 dev->hw_features |= NETIF_F_LRO;
Michael Chanc0c050c2015-10-22 16:01:17 -04007149
Michael Chanc0c050c2015-10-22 16:01:17 -04007150 dev->hw_enc_features =
7151 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7152 NETIF_F_TSO | NETIF_F_TSO6 |
7153 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Alexander Duyck152971e2016-05-02 09:38:55 -07007154 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07007155 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
Alexander Duyck152971e2016-05-02 09:38:55 -07007156 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
7157 NETIF_F_GSO_GRE_CSUM;
Michael Chanc0c050c2015-10-22 16:01:17 -04007158 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
7159 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
7160 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
7161 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
7162 dev->priv_flags |= IFF_UNICAST_FLT;
7163
Jarod Wilsone1c6dcc2016-10-17 15:54:04 -04007164 /* MTU range: 60 - 9500 */
7165 dev->min_mtu = ETH_ZLEN;
7166 dev->max_mtu = 9500;
7167
Michael Chan7df4ae92016-12-02 21:17:17 -05007168 bnxt_dcb_init(bp);
7169
Michael Chanc0c050c2015-10-22 16:01:17 -04007170#ifdef CONFIG_BNXT_SRIOV
7171 init_waitqueue_head(&bp->sriov_cfg_wait);
7172#endif
Michael Chan309369c2016-06-13 02:25:34 -04007173 bp->gro_func = bnxt_gro_func_5730x;
Michael Chan94758f82016-06-13 02:25:35 -04007174 if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
7175 bp->gro_func = bnxt_gro_func_5731x;
Michael Chan309369c2016-06-13 02:25:34 -04007176
Michael Chanc0c050c2015-10-22 16:01:17 -04007177 rc = bnxt_hwrm_func_drv_rgtr(bp);
7178 if (rc)
7179 goto init_err;
7180
Michael Chana1653b12016-12-07 00:26:20 -05007181 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
7182 if (rc)
7183 goto init_err;
7184
Michael Chana588e452016-12-07 00:26:21 -05007185 bp->ulp_probe = bnxt_ulp_probe;
7186
Michael Chanc0c050c2015-10-22 16:01:17 -04007187 /* Get the MAX capabilities for this function */
7188 rc = bnxt_hwrm_func_qcaps(bp);
7189 if (rc) {
7190 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
7191 rc);
7192 rc = -1;
7193 goto init_err;
7194 }
7195
7196 rc = bnxt_hwrm_queue_qportcfg(bp);
7197 if (rc) {
7198 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
7199 rc);
7200 rc = -1;
7201 goto init_err;
7202 }
7203
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04007204 bnxt_hwrm_func_qcfg(bp);
7205
Michael Chanc0c050c2015-10-22 16:01:17 -04007206 bnxt_set_tpa_flags(bp);
7207 bnxt_set_ring_params(bp);
Michael Chan33c26572016-12-07 00:26:15 -05007208 bnxt_set_max_func_irqs(bp, max_irqs);
Michael Chan6e6c5a52016-01-02 23:45:02 -05007209 bnxt_set_dflt_rings(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007210
Michael Chan87da7f72016-11-16 21:13:09 -05007211 /* Default RSS hash cfg. */
7212 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
7213 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
7214 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
7215 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
7216 if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) &&
7217 !BNXT_CHIP_TYPE_NITRO_A0(bp) &&
7218 bp->hwrm_spec_code >= 0x10501) {
7219 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
7220 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
7221 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
7222 }
7223
Michael Chan8fdefd62016-12-29 12:13:36 -05007224 bnxt_hwrm_vnic_qcaps(bp);
Michael Chan8079e8f2016-12-29 12:13:37 -05007225 if (bnxt_rfs_supported(bp)) {
Michael Chan2bcfa6f2015-12-27 18:19:24 -05007226 dev->hw_features |= NETIF_F_NTUPLE;
7227 if (bnxt_rfs_capable(bp)) {
7228 bp->flags |= BNXT_FLAG_RFS;
7229 dev->features |= NETIF_F_NTUPLE;
7230 }
7231 }
7232
Michael Chanc0c050c2015-10-22 16:01:17 -04007233 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
7234 bp->flags |= BNXT_FLAG_STRIP_VLAN;
7235
7236 rc = bnxt_probe_phy(bp);
7237 if (rc)
7238 goto init_err;
7239
Michael Chanaa8ed022016-12-07 00:26:17 -05007240 rc = bnxt_hwrm_func_reset(bp);
7241 if (rc)
7242 goto init_err;
7243
Michael Chan78095922016-12-07 00:26:16 -05007244 rc = bnxt_init_int_mode(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007245 if (rc)
7246 goto init_err;
7247
Michael Chan78095922016-12-07 00:26:16 -05007248 rc = register_netdev(dev);
7249 if (rc)
7250 goto init_err_clr_int;
7251
Michael Chanc0c050c2015-10-22 16:01:17 -04007252 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
7253 board_info[ent->driver_data].name,
7254 (long)pci_resource_start(pdev, 0), dev->dev_addr);
7255
Ajit Khaparde90c4f782016-05-15 03:04:45 -04007256 bnxt_parse_log_pcie_link(bp);
7257
Michael Chanc0c050c2015-10-22 16:01:17 -04007258 return 0;
7259
Michael Chan78095922016-12-07 00:26:16 -05007260init_err_clr_int:
7261 bnxt_clear_int_mode(bp);
7262
Michael Chanc0c050c2015-10-22 16:01:17 -04007263init_err:
7264 pci_iounmap(pdev, bp->bar0);
7265 pci_release_regions(pdev);
7266 pci_disable_device(pdev);
7267
7268init_err_free:
7269 free_netdev(dev);
7270 return rc;
7271}
7272
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007273/**
7274 * bnxt_io_error_detected - called when PCI error is detected
7275 * @pdev: Pointer to PCI device
7276 * @state: The current pci connection state
7277 *
7278 * This function is called after a PCI bus error affecting
7279 * this device has been detected.
7280 */
7281static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
7282 pci_channel_state_t state)
7283{
7284 struct net_device *netdev = pci_get_drvdata(pdev);
Michael Chana588e452016-12-07 00:26:21 -05007285 struct bnxt *bp = netdev_priv(netdev);
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007286
7287 netdev_info(netdev, "PCI I/O error detected\n");
7288
7289 rtnl_lock();
7290 netif_device_detach(netdev);
7291
Michael Chana588e452016-12-07 00:26:21 -05007292 bnxt_ulp_stop(bp);
7293
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007294 if (state == pci_channel_io_perm_failure) {
7295 rtnl_unlock();
7296 return PCI_ERS_RESULT_DISCONNECT;
7297 }
7298
7299 if (netif_running(netdev))
7300 bnxt_close(netdev);
7301
7302 pci_disable_device(pdev);
7303 rtnl_unlock();
7304
7305 /* Request a slot slot reset. */
7306 return PCI_ERS_RESULT_NEED_RESET;
7307}
7308
7309/**
7310 * bnxt_io_slot_reset - called after the pci bus has been reset.
7311 * @pdev: Pointer to PCI device
7312 *
7313 * Restart the card from scratch, as if from a cold-boot.
7314 * At this point, the card has exprienced a hard reset,
7315 * followed by fixups by BIOS, and has its config space
7316 * set up identically to what it was at cold boot.
7317 */
7318static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
7319{
7320 struct net_device *netdev = pci_get_drvdata(pdev);
7321 struct bnxt *bp = netdev_priv(netdev);
7322 int err = 0;
7323 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
7324
7325 netdev_info(bp->dev, "PCI Slot Reset\n");
7326
7327 rtnl_lock();
7328
7329 if (pci_enable_device(pdev)) {
7330 dev_err(&pdev->dev,
7331 "Cannot re-enable PCI device after reset.\n");
7332 } else {
7333 pci_set_master(pdev);
7334
Michael Chanaa8ed022016-12-07 00:26:17 -05007335 err = bnxt_hwrm_func_reset(bp);
7336 if (!err && netif_running(netdev))
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007337 err = bnxt_open(netdev);
7338
Michael Chana588e452016-12-07 00:26:21 -05007339 if (!err) {
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007340 result = PCI_ERS_RESULT_RECOVERED;
Michael Chana588e452016-12-07 00:26:21 -05007341 bnxt_ulp_start(bp);
7342 }
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007343 }
7344
7345 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
7346 dev_close(netdev);
7347
7348 rtnl_unlock();
7349
7350 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7351 if (err) {
7352 dev_err(&pdev->dev,
7353 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7354 err); /* non-fatal, continue */
7355 }
7356
7357 return PCI_ERS_RESULT_RECOVERED;
7358}
7359
7360/**
7361 * bnxt_io_resume - called when traffic can start flowing again.
7362 * @pdev: Pointer to PCI device
7363 *
7364 * This callback is called when the error recovery driver tells
7365 * us that its OK to resume normal operation.
7366 */
7367static void bnxt_io_resume(struct pci_dev *pdev)
7368{
7369 struct net_device *netdev = pci_get_drvdata(pdev);
7370
7371 rtnl_lock();
7372
7373 netif_device_attach(netdev);
7374
7375 rtnl_unlock();
7376}
7377
7378static const struct pci_error_handlers bnxt_err_handler = {
7379 .error_detected = bnxt_io_error_detected,
7380 .slot_reset = bnxt_io_slot_reset,
7381 .resume = bnxt_io_resume
7382};
7383
Michael Chanc0c050c2015-10-22 16:01:17 -04007384static struct pci_driver bnxt_pci_driver = {
7385 .name = DRV_MODULE_NAME,
7386 .id_table = bnxt_pci_tbl,
7387 .probe = bnxt_init_one,
7388 .remove = bnxt_remove_one,
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007389 .err_handler = &bnxt_err_handler,
Michael Chanc0c050c2015-10-22 16:01:17 -04007390#if defined(CONFIG_BNXT_SRIOV)
7391 .sriov_configure = bnxt_sriov_configure,
7392#endif
7393};
7394
7395module_pci_driver(bnxt_pci_driver);