blob: a34fcddbd14ccc4e70e21976fb07d6ad0b540484 [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
Michael Chan11f15ed2016-04-05 14:08:55 -04003 * Copyright (c) 2014-2016 Broadcom Corporation
Michael Chanbac9a7e2017-02-12 19:18:10 -05004 * Copyright (c) 2016-2017 Broadcom Limited
Michael Chanc0c050c2015-10-22 16:01:17 -04005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
34#include <linux/if.h>
35#include <linux/if_vlan.h>
Michael Chan32e8239c2017-07-24 12:34:21 -040036#include <linux/if_bridge.h>
Rob Swindell5ac67d82016-09-19 03:58:03 -040037#include <linux/rtc.h>
Michael Chanc6d30e82017-02-06 16:55:42 -050038#include <linux/bpf.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040039#include <net/ip.h>
40#include <net/tcp.h>
41#include <net/udp.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
Alexander Duyckad51b8e2016-06-16 12:21:19 -070044#include <net/udp_tunnel.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040045#include <linux/workqueue.h>
46#include <linux/prefetch.h>
47#include <linux/cache.h>
48#include <linux/log2.h>
49#include <linux/aer.h>
50#include <linux/bitmap.h>
51#include <linux/cpu_rmap.h>
Vasundhara Volam56f0fd82017-08-28 13:40:27 -040052#include <linux/cpumask.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040053
54#include "bnxt_hsi.h"
55#include "bnxt.h"
Michael Chana588e452016-12-07 00:26:21 -050056#include "bnxt_ulp.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040057#include "bnxt_sriov.h"
58#include "bnxt_ethtool.h"
Michael Chan7df4ae92016-12-02 21:17:17 -050059#include "bnxt_dcb.h"
Michael Chanc6d30e82017-02-06 16:55:42 -050060#include "bnxt_xdp.h"
Sathya Perla4ab0c6a2017-07-24 12:34:27 -040061#include "bnxt_vfr.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040062
63#define BNXT_TX_TIMEOUT (5 * HZ)
64
65static const char version[] =
66 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
67
68MODULE_LICENSE("GPL");
69MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
70MODULE_VERSION(DRV_MODULE_VERSION);
71
72#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
73#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
74#define BNXT_RX_COPY_THRESH 256
75
Michael Chan4419dbe2016-02-10 17:33:49 -050076#define BNXT_TX_PUSH_THRESH 164
Michael Chanc0c050c2015-10-22 16:01:17 -040077
78enum board_idx {
David Christensenfbc9a522015-12-27 18:19:29 -050079 BCM57301,
Michael Chanc0c050c2015-10-22 16:01:17 -040080 BCM57302,
81 BCM57304,
Michael Chan1f681682016-07-25 12:33:37 -040082 BCM57417_NPAR,
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -040083 BCM58700,
Michael Chanb24eb6a2016-06-13 02:25:36 -040084 BCM57311,
85 BCM57312,
David Christensenfbc9a522015-12-27 18:19:29 -050086 BCM57402,
Michael Chanc0c050c2015-10-22 16:01:17 -040087 BCM57404,
88 BCM57406,
Michael Chan1f681682016-07-25 12:33:37 -040089 BCM57402_NPAR,
90 BCM57407,
Michael Chanb24eb6a2016-06-13 02:25:36 -040091 BCM57412,
92 BCM57414,
93 BCM57416,
94 BCM57417,
Michael Chan1f681682016-07-25 12:33:37 -040095 BCM57412_NPAR,
Michael Chan5049e332016-05-15 03:04:50 -040096 BCM57314,
Michael Chan1f681682016-07-25 12:33:37 -040097 BCM57417_SFP,
98 BCM57416_SFP,
99 BCM57404_NPAR,
100 BCM57406_NPAR,
101 BCM57407_SFP,
Michael Chanadbc8302016-09-19 03:58:01 -0400102 BCM57407_NPAR,
Michael Chan1f681682016-07-25 12:33:37 -0400103 BCM57414_NPAR,
104 BCM57416_NPAR,
Deepak Khungar32b40792017-02-12 19:18:18 -0500105 BCM57452,
106 BCM57454,
Ray Jui4a581392017-08-28 13:40:28 -0400107 BCM58802,
108 BCM58808,
Michael Chanadbc8302016-09-19 03:58:01 -0400109 NETXTREME_E_VF,
110 NETXTREME_C_VF,
Michael Chanc0c050c2015-10-22 16:01:17 -0400111};
112
113/* indexed by enum above */
114static const struct {
115 char *name;
116} board_info[] = {
Scott Branden27573a72017-08-28 13:40:29 -0400117 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
118 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
119 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
120 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
121 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
122 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
123 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
124 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
125 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
126 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
127 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
128 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
129 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
130 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
131 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
132 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
133 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
134 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
136 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
137 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
138 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
139 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
140 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
141 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
142 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
143 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
144 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
145 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
146 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
147 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
148 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
Michael Chanc0c050c2015-10-22 16:01:17 -0400149};
150
151static const struct pci_device_id bnxt_pci_tbl[] = {
Ray Jui4a581392017-08-28 13:40:28 -0400152 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
Michael Chanadbc8302016-09-19 03:58:01 -0400153 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
David Christensenfbc9a522015-12-27 18:19:29 -0500154 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400155 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
156 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
Michael Chan1f681682016-07-25 12:33:37 -0400157 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -0400158 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400159 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
160 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
David Christensenfbc9a522015-12-27 18:19:29 -0500161 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400162 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
163 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
Michael Chan1f681682016-07-25 12:33:37 -0400164 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
165 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400166 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
167 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
168 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
169 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
Michael Chan1f681682016-07-25 12:33:37 -0400170 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
Michael Chan5049e332016-05-15 03:04:50 -0400171 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
Michael Chan1f681682016-07-25 12:33:37 -0400172 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
173 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
174 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
Michael Chanadbc8302016-09-19 03:58:01 -0400177 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400179 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400180 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400181 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400182 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
Ray Jui4a581392017-08-28 13:40:28 -0400183 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
Deepak Khungar32b40792017-02-12 19:18:18 -0500184 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
Ray Jui4a581392017-08-28 13:40:28 -0400185 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400186#ifdef CONFIG_BNXT_SRIOV
Deepak Khungarc7ef35e2017-05-29 19:06:05 -0400187 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
188 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
Michael Chanadbc8302016-09-19 03:58:01 -0400189 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
190 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
191 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
192 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
193 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
194 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
Michael Chanc0c050c2015-10-22 16:01:17 -0400195#endif
196 { 0 }
197};
198
199MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
200
201static const u16 bnxt_vf_req_snif[] = {
202 HWRM_FUNC_CFG,
203 HWRM_PORT_PHY_QCFG,
204 HWRM_CFA_L2_FILTER_ALLOC,
205};
206
Michael Chan25be8622016-04-05 14:09:00 -0400207static const u16 bnxt_async_events_arr[] = {
Michael Chan87c374d2016-12-02 21:17:16 -0500208 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
209 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
210 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
211 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
212 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
Michael Chan25be8622016-04-05 14:09:00 -0400213};
214
Michael Chanc0c050c2015-10-22 16:01:17 -0400215static bool bnxt_vf_pciid(enum board_idx idx)
216{
Michael Chanadbc8302016-09-19 03:58:01 -0400217 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
Michael Chanc0c050c2015-10-22 16:01:17 -0400218}
219
220#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
221#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
222#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
223
224#define BNXT_CP_DB_REARM(db, raw_cons) \
225 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
226
227#define BNXT_CP_DB(db, raw_cons) \
228 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
229
230#define BNXT_CP_DB_IRQ_DIS(db) \
231 writel(DB_CP_IRQ_DIS_FLAGS, db)
232
Michael Chan38413402017-02-06 16:55:43 -0500233const u16 bnxt_lhint_arr[] = {
Michael Chanc0c050c2015-10-22 16:01:17 -0400234 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
235 TX_BD_FLAGS_LHINT_512_TO_1023,
236 TX_BD_FLAGS_LHINT_1024_TO_2047,
237 TX_BD_FLAGS_LHINT_1024_TO_2047,
238 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
239 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
240 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
241 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
242 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
243 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
244 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
245 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
246 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
247 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
248 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
249 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
250 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
251 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
252 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
253};
254
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400255static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
256{
257 struct metadata_dst *md_dst = skb_metadata_dst(skb);
258
259 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
260 return 0;
261
262 return md_dst->u.port_info.port_id;
263}
264
Michael Chanc0c050c2015-10-22 16:01:17 -0400265static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
266{
267 struct bnxt *bp = netdev_priv(dev);
268 struct tx_bd *txbd;
269 struct tx_bd_ext *txbd1;
270 struct netdev_queue *txq;
271 int i;
272 dma_addr_t mapping;
273 unsigned int length, pad = 0;
274 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
275 u16 prod, last_frag;
276 struct pci_dev *pdev = bp->pdev;
Michael Chanc0c050c2015-10-22 16:01:17 -0400277 struct bnxt_tx_ring_info *txr;
278 struct bnxt_sw_tx_bd *tx_buf;
279
280 i = skb_get_queue_mapping(skb);
281 if (unlikely(i >= bp->tx_nr_rings)) {
282 dev_kfree_skb_any(skb);
283 return NETDEV_TX_OK;
284 }
285
Michael Chanc0c050c2015-10-22 16:01:17 -0400286 txq = netdev_get_tx_queue(dev, i);
Michael Chana960dec2017-02-06 16:55:39 -0500287 txr = &bp->tx_ring[bp->tx_ring_map[i]];
Michael Chanc0c050c2015-10-22 16:01:17 -0400288 prod = txr->tx_prod;
289
290 free_size = bnxt_tx_avail(bp, txr);
291 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
292 netif_tx_stop_queue(txq);
293 return NETDEV_TX_BUSY;
294 }
295
296 length = skb->len;
297 len = skb_headlen(skb);
298 last_frag = skb_shinfo(skb)->nr_frags;
299
300 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
301
302 txbd->tx_bd_opaque = prod;
303
304 tx_buf = &txr->tx_buf_ring[prod];
305 tx_buf->skb = skb;
306 tx_buf->nr_frags = last_frag;
307
308 vlan_tag_flags = 0;
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400309 cfa_action = bnxt_xmit_get_cfa_action(skb);
Michael Chanc0c050c2015-10-22 16:01:17 -0400310 if (skb_vlan_tag_present(skb)) {
311 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
312 skb_vlan_tag_get(skb);
313 /* Currently supports 8021Q, 8021AD vlan offloads
314 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
315 */
316 if (skb->vlan_proto == htons(ETH_P_8021Q))
317 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
318 }
319
320 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
Michael Chan4419dbe2016-02-10 17:33:49 -0500321 struct tx_push_buffer *tx_push_buf = txr->tx_push;
322 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
323 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
324 void *pdata = tx_push_buf->data;
325 u64 *end;
326 int j, push_len;
Michael Chanc0c050c2015-10-22 16:01:17 -0400327
328 /* Set COAL_NOW to be ready quickly for the next push */
329 tx_push->tx_bd_len_flags_type =
330 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
331 TX_BD_TYPE_LONG_TX_BD |
332 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
333 TX_BD_FLAGS_COAL_NOW |
334 TX_BD_FLAGS_PACKET_END |
335 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
336
337 if (skb->ip_summed == CHECKSUM_PARTIAL)
338 tx_push1->tx_bd_hsize_lflags =
339 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
340 else
341 tx_push1->tx_bd_hsize_lflags = 0;
342
343 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400344 tx_push1->tx_bd_cfa_action =
345 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
Michael Chanc0c050c2015-10-22 16:01:17 -0400346
Michael Chanfbb0fa82016-02-22 02:10:26 -0500347 end = pdata + length;
348 end = PTR_ALIGN(end, 8) - 1;
Michael Chan4419dbe2016-02-10 17:33:49 -0500349 *end = 0;
350
Michael Chanc0c050c2015-10-22 16:01:17 -0400351 skb_copy_from_linear_data(skb, pdata, len);
352 pdata += len;
353 for (j = 0; j < last_frag; j++) {
354 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
355 void *fptr;
356
357 fptr = skb_frag_address_safe(frag);
358 if (!fptr)
359 goto normal_tx;
360
361 memcpy(pdata, fptr, skb_frag_size(frag));
362 pdata += skb_frag_size(frag);
363 }
364
Michael Chan4419dbe2016-02-10 17:33:49 -0500365 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
366 txbd->tx_bd_haddr = txr->data_mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400367 prod = NEXT_TX(prod);
368 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
369 memcpy(txbd, tx_push1, sizeof(*txbd));
370 prod = NEXT_TX(prod);
Michael Chan4419dbe2016-02-10 17:33:49 -0500371 tx_push->doorbell =
Michael Chanc0c050c2015-10-22 16:01:17 -0400372 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
373 txr->tx_prod = prod;
374
Michael Chanb9a84602016-06-06 02:37:14 -0400375 tx_buf->is_push = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -0400376 netdev_tx_sent_queue(txq, skb->len);
Michael Chanb9a84602016-06-06 02:37:14 -0400377 wmb(); /* Sync is_push and byte queue before pushing data */
Michael Chanc0c050c2015-10-22 16:01:17 -0400378
Michael Chan4419dbe2016-02-10 17:33:49 -0500379 push_len = (length + sizeof(*tx_push) + 7) / 8;
380 if (push_len > 16) {
381 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
Michael Chan9d137442016-09-05 01:57:35 -0400382 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
383 (push_len - 16) << 1);
Michael Chan4419dbe2016-02-10 17:33:49 -0500384 } else {
385 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
386 push_len);
387 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400388
Michael Chanc0c050c2015-10-22 16:01:17 -0400389 goto tx_done;
390 }
391
392normal_tx:
393 if (length < BNXT_MIN_PKT_SIZE) {
394 pad = BNXT_MIN_PKT_SIZE - length;
395 if (skb_pad(skb, pad)) {
396 /* SKB already freed. */
397 tx_buf->skb = NULL;
398 return NETDEV_TX_OK;
399 }
400 length = BNXT_MIN_PKT_SIZE;
401 }
402
403 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
404
405 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
406 dev_kfree_skb_any(skb);
407 tx_buf->skb = NULL;
408 return NETDEV_TX_OK;
409 }
410
411 dma_unmap_addr_set(tx_buf, mapping, mapping);
412 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
413 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
414
415 txbd->tx_bd_haddr = cpu_to_le64(mapping);
416
417 prod = NEXT_TX(prod);
418 txbd1 = (struct tx_bd_ext *)
419 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
420
421 txbd1->tx_bd_hsize_lflags = 0;
422 if (skb_is_gso(skb)) {
423 u32 hdr_len;
424
425 if (skb->encapsulation)
426 hdr_len = skb_inner_network_offset(skb) +
427 skb_inner_network_header_len(skb) +
428 inner_tcp_hdrlen(skb);
429 else
430 hdr_len = skb_transport_offset(skb) +
431 tcp_hdrlen(skb);
432
433 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
434 TX_BD_FLAGS_T_IPID |
435 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
436 length = skb_shinfo(skb)->gso_size;
437 txbd1->tx_bd_mss = cpu_to_le32(length);
438 length += hdr_len;
439 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
440 txbd1->tx_bd_hsize_lflags =
441 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
442 txbd1->tx_bd_mss = 0;
443 }
444
445 length >>= 9;
446 flags |= bnxt_lhint_arr[length];
447 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
448
449 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400450 txbd1->tx_bd_cfa_action =
451 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
Michael Chanc0c050c2015-10-22 16:01:17 -0400452 for (i = 0; i < last_frag; i++) {
453 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
454
455 prod = NEXT_TX(prod);
456 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
457
458 len = skb_frag_size(frag);
459 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
460 DMA_TO_DEVICE);
461
462 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
463 goto tx_dma_error;
464
465 tx_buf = &txr->tx_buf_ring[prod];
466 dma_unmap_addr_set(tx_buf, mapping, mapping);
467
468 txbd->tx_bd_haddr = cpu_to_le64(mapping);
469
470 flags = len << TX_BD_LEN_SHIFT;
471 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
472 }
473
474 flags &= ~TX_BD_LEN;
475 txbd->tx_bd_len_flags_type =
476 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
477 TX_BD_FLAGS_PACKET_END);
478
479 netdev_tx_sent_queue(txq, skb->len);
480
481 /* Sync BD data before updating doorbell */
482 wmb();
483
484 prod = NEXT_TX(prod);
485 txr->tx_prod = prod;
486
Michael Chanffe40642017-05-30 20:03:00 -0400487 if (!skb->xmit_more || netif_xmit_stopped(txq))
Michael Chan4d172f22017-05-29 19:06:09 -0400488 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
Michael Chanc0c050c2015-10-22 16:01:17 -0400489
490tx_done:
491
492 mmiowb();
493
494 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
Michael Chan4d172f22017-05-29 19:06:09 -0400495 if (skb->xmit_more && !tx_buf->is_push)
496 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
497
Michael Chanc0c050c2015-10-22 16:01:17 -0400498 netif_tx_stop_queue(txq);
499
500 /* netif_tx_stop_queue() must be done before checking
501 * tx index in bnxt_tx_avail() below, because in
502 * bnxt_tx_int(), we update tx index before checking for
503 * netif_tx_queue_stopped().
504 */
505 smp_mb();
506 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
507 netif_tx_wake_queue(txq);
508 }
509 return NETDEV_TX_OK;
510
511tx_dma_error:
512 last_frag = i;
513
514 /* start back at beginning and unmap skb */
515 prod = txr->tx_prod;
516 tx_buf = &txr->tx_buf_ring[prod];
517 tx_buf->skb = NULL;
518 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
519 skb_headlen(skb), PCI_DMA_TODEVICE);
520 prod = NEXT_TX(prod);
521
522 /* unmap remaining mapped pages */
523 for (i = 0; i < last_frag; i++) {
524 prod = NEXT_TX(prod);
525 tx_buf = &txr->tx_buf_ring[prod];
526 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
527 skb_frag_size(&skb_shinfo(skb)->frags[i]),
528 PCI_DMA_TODEVICE);
529 }
530
531 dev_kfree_skb_any(skb);
532 return NETDEV_TX_OK;
533}
534
535static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
536{
Michael Chanb6ab4b02016-01-02 23:44:59 -0500537 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chana960dec2017-02-06 16:55:39 -0500538 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
Michael Chanc0c050c2015-10-22 16:01:17 -0400539 u16 cons = txr->tx_cons;
540 struct pci_dev *pdev = bp->pdev;
541 int i;
542 unsigned int tx_bytes = 0;
543
544 for (i = 0; i < nr_pkts; i++) {
545 struct bnxt_sw_tx_bd *tx_buf;
546 struct sk_buff *skb;
547 int j, last;
548
549 tx_buf = &txr->tx_buf_ring[cons];
550 cons = NEXT_TX(cons);
551 skb = tx_buf->skb;
552 tx_buf->skb = NULL;
553
554 if (tx_buf->is_push) {
555 tx_buf->is_push = 0;
556 goto next_tx_int;
557 }
558
559 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
560 skb_headlen(skb), PCI_DMA_TODEVICE);
561 last = tx_buf->nr_frags;
562
563 for (j = 0; j < last; j++) {
564 cons = NEXT_TX(cons);
565 tx_buf = &txr->tx_buf_ring[cons];
566 dma_unmap_page(
567 &pdev->dev,
568 dma_unmap_addr(tx_buf, mapping),
569 skb_frag_size(&skb_shinfo(skb)->frags[j]),
570 PCI_DMA_TODEVICE);
571 }
572
573next_tx_int:
574 cons = NEXT_TX(cons);
575
576 tx_bytes += skb->len;
577 dev_kfree_skb_any(skb);
578 }
579
580 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
581 txr->tx_cons = cons;
582
583 /* Need to make the tx_cons update visible to bnxt_start_xmit()
584 * before checking for netif_tx_queue_stopped(). Without the
585 * memory barrier, there is a small possibility that bnxt_start_xmit()
586 * will miss it and cause the queue to be stopped forever.
587 */
588 smp_mb();
589
590 if (unlikely(netif_tx_queue_stopped(txq)) &&
591 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
592 __netif_tx_lock(txq, smp_processor_id());
593 if (netif_tx_queue_stopped(txq) &&
594 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
595 txr->dev_state != BNXT_DEV_STATE_CLOSING)
596 netif_tx_wake_queue(txq);
597 __netif_tx_unlock(txq);
598 }
599}
600
Michael Chanc61fb992017-02-06 16:55:36 -0500601static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
602 gfp_t gfp)
603{
604 struct device *dev = &bp->pdev->dev;
605 struct page *page;
606
607 page = alloc_page(gfp);
608 if (!page)
609 return NULL;
610
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700611 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
612 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -0500613 if (dma_mapping_error(dev, *mapping)) {
614 __free_page(page);
615 return NULL;
616 }
617 *mapping += bp->rx_dma_offset;
618 return page;
619}
620
Michael Chanc0c050c2015-10-22 16:01:17 -0400621static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
622 gfp_t gfp)
623{
624 u8 *data;
625 struct pci_dev *pdev = bp->pdev;
626
627 data = kmalloc(bp->rx_buf_size, gfp);
628 if (!data)
629 return NULL;
630
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700631 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
632 bp->rx_buf_use_size, bp->rx_dir,
633 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400634
635 if (dma_mapping_error(&pdev->dev, *mapping)) {
636 kfree(data);
637 data = NULL;
638 }
639 return data;
640}
641
Michael Chan38413402017-02-06 16:55:43 -0500642int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
643 u16 prod, gfp_t gfp)
Michael Chanc0c050c2015-10-22 16:01:17 -0400644{
645 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
646 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
Michael Chanc0c050c2015-10-22 16:01:17 -0400647 dma_addr_t mapping;
648
Michael Chanc61fb992017-02-06 16:55:36 -0500649 if (BNXT_RX_PAGE_MODE(bp)) {
650 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
Michael Chanc0c050c2015-10-22 16:01:17 -0400651
Michael Chanc61fb992017-02-06 16:55:36 -0500652 if (!page)
653 return -ENOMEM;
654
655 rx_buf->data = page;
656 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
657 } else {
658 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
659
660 if (!data)
661 return -ENOMEM;
662
663 rx_buf->data = data;
664 rx_buf->data_ptr = data + bp->rx_offset;
665 }
Michael Chan11cd1192017-02-06 16:55:33 -0500666 rx_buf->mapping = mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400667
668 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -0400669 return 0;
670}
671
Michael Chanc6d30e82017-02-06 16:55:42 -0500672void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
Michael Chanc0c050c2015-10-22 16:01:17 -0400673{
674 u16 prod = rxr->rx_prod;
675 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
676 struct rx_bd *cons_bd, *prod_bd;
677
678 prod_rx_buf = &rxr->rx_buf_ring[prod];
679 cons_rx_buf = &rxr->rx_buf_ring[cons];
680
681 prod_rx_buf->data = data;
Michael Chan6bb19472017-02-06 16:55:32 -0500682 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
Michael Chanc0c050c2015-10-22 16:01:17 -0400683
Michael Chan11cd1192017-02-06 16:55:33 -0500684 prod_rx_buf->mapping = cons_rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400685
686 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
687 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
688
689 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
690}
691
692static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
693{
694 u16 next, max = rxr->rx_agg_bmap_size;
695
696 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
697 if (next >= max)
698 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
699 return next;
700}
701
702static inline int bnxt_alloc_rx_page(struct bnxt *bp,
703 struct bnxt_rx_ring_info *rxr,
704 u16 prod, gfp_t gfp)
705{
706 struct rx_bd *rxbd =
707 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
708 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
709 struct pci_dev *pdev = bp->pdev;
710 struct page *page;
711 dma_addr_t mapping;
712 u16 sw_prod = rxr->rx_sw_agg_prod;
Michael Chan89d0a062016-04-25 02:30:51 -0400713 unsigned int offset = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -0400714
Michael Chan89d0a062016-04-25 02:30:51 -0400715 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
716 page = rxr->rx_page;
717 if (!page) {
718 page = alloc_page(gfp);
719 if (!page)
720 return -ENOMEM;
721 rxr->rx_page = page;
722 rxr->rx_page_offset = 0;
723 }
724 offset = rxr->rx_page_offset;
725 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
726 if (rxr->rx_page_offset == PAGE_SIZE)
727 rxr->rx_page = NULL;
728 else
729 get_page(page);
730 } else {
731 page = alloc_page(gfp);
732 if (!page)
733 return -ENOMEM;
734 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400735
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700736 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
737 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
738 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400739 if (dma_mapping_error(&pdev->dev, mapping)) {
740 __free_page(page);
741 return -EIO;
742 }
743
744 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
745 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
746
747 __set_bit(sw_prod, rxr->rx_agg_bmap);
748 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
749 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
750
751 rx_agg_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400752 rx_agg_buf->offset = offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400753 rx_agg_buf->mapping = mapping;
754 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
755 rxbd->rx_bd_opaque = sw_prod;
756 return 0;
757}
758
759static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
760 u32 agg_bufs)
761{
762 struct bnxt *bp = bnapi->bp;
763 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500764 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400765 u16 prod = rxr->rx_agg_prod;
766 u16 sw_prod = rxr->rx_sw_agg_prod;
767 u32 i;
768
769 for (i = 0; i < agg_bufs; i++) {
770 u16 cons;
771 struct rx_agg_cmp *agg;
772 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
773 struct rx_bd *prod_bd;
774 struct page *page;
775
776 agg = (struct rx_agg_cmp *)
777 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
778 cons = agg->rx_agg_cmp_opaque;
779 __clear_bit(cons, rxr->rx_agg_bmap);
780
781 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
782 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
783
784 __set_bit(sw_prod, rxr->rx_agg_bmap);
785 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
786 cons_rx_buf = &rxr->rx_agg_ring[cons];
787
788 /* It is possible for sw_prod to be equal to cons, so
789 * set cons_rx_buf->page to NULL first.
790 */
791 page = cons_rx_buf->page;
792 cons_rx_buf->page = NULL;
793 prod_rx_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400794 prod_rx_buf->offset = cons_rx_buf->offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400795
796 prod_rx_buf->mapping = cons_rx_buf->mapping;
797
798 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
799
800 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
801 prod_bd->rx_bd_opaque = sw_prod;
802
803 prod = NEXT_RX_AGG(prod);
804 sw_prod = NEXT_RX_AGG(sw_prod);
805 cp_cons = NEXT_CMP(cp_cons);
806 }
807 rxr->rx_agg_prod = prod;
808 rxr->rx_sw_agg_prod = sw_prod;
809}
810
Michael Chanc61fb992017-02-06 16:55:36 -0500811static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
812 struct bnxt_rx_ring_info *rxr,
813 u16 cons, void *data, u8 *data_ptr,
814 dma_addr_t dma_addr,
815 unsigned int offset_and_len)
816{
817 unsigned int payload = offset_and_len >> 16;
818 unsigned int len = offset_and_len & 0xffff;
819 struct skb_frag_struct *frag;
820 struct page *page = data;
821 u16 prod = rxr->rx_prod;
822 struct sk_buff *skb;
823 int off, err;
824
825 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
826 if (unlikely(err)) {
827 bnxt_reuse_rx_data(rxr, cons, data);
828 return NULL;
829 }
830 dma_addr -= bp->rx_dma_offset;
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700831 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
832 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -0500833
834 if (unlikely(!payload))
835 payload = eth_get_headlen(data_ptr, len);
836
837 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
838 if (!skb) {
839 __free_page(page);
840 return NULL;
841 }
842
843 off = (void *)data_ptr - page_address(page);
844 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
845 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
846 payload + NET_IP_ALIGN);
847
848 frag = &skb_shinfo(skb)->frags[0];
849 skb_frag_size_sub(frag, payload);
850 frag->page_offset += payload;
851 skb->data_len -= payload;
852 skb->tail += payload;
853
854 return skb;
855}
856
Michael Chanc0c050c2015-10-22 16:01:17 -0400857static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
858 struct bnxt_rx_ring_info *rxr, u16 cons,
Michael Chan6bb19472017-02-06 16:55:32 -0500859 void *data, u8 *data_ptr,
860 dma_addr_t dma_addr,
861 unsigned int offset_and_len)
Michael Chanc0c050c2015-10-22 16:01:17 -0400862{
Michael Chan6bb19472017-02-06 16:55:32 -0500863 u16 prod = rxr->rx_prod;
Michael Chanc0c050c2015-10-22 16:01:17 -0400864 struct sk_buff *skb;
Michael Chan6bb19472017-02-06 16:55:32 -0500865 int err;
Michael Chanc0c050c2015-10-22 16:01:17 -0400866
867 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
868 if (unlikely(err)) {
869 bnxt_reuse_rx_data(rxr, cons, data);
870 return NULL;
871 }
872
873 skb = build_skb(data, 0);
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700874 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
875 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400876 if (!skb) {
877 kfree(data);
878 return NULL;
879 }
880
Michael Chanb3dba772017-02-06 16:55:35 -0500881 skb_reserve(skb, bp->rx_offset);
Michael Chan6bb19472017-02-06 16:55:32 -0500882 skb_put(skb, offset_and_len & 0xffff);
Michael Chanc0c050c2015-10-22 16:01:17 -0400883 return skb;
884}
885
886static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
887 struct sk_buff *skb, u16 cp_cons,
888 u32 agg_bufs)
889{
890 struct pci_dev *pdev = bp->pdev;
891 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500892 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400893 u16 prod = rxr->rx_agg_prod;
894 u32 i;
895
896 for (i = 0; i < agg_bufs; i++) {
897 u16 cons, frag_len;
898 struct rx_agg_cmp *agg;
899 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
900 struct page *page;
901 dma_addr_t mapping;
902
903 agg = (struct rx_agg_cmp *)
904 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
905 cons = agg->rx_agg_cmp_opaque;
906 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
907 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
908
909 cons_rx_buf = &rxr->rx_agg_ring[cons];
Michael Chan89d0a062016-04-25 02:30:51 -0400910 skb_fill_page_desc(skb, i, cons_rx_buf->page,
911 cons_rx_buf->offset, frag_len);
Michael Chanc0c050c2015-10-22 16:01:17 -0400912 __clear_bit(cons, rxr->rx_agg_bmap);
913
914 /* It is possible for bnxt_alloc_rx_page() to allocate
915 * a sw_prod index that equals the cons index, so we
916 * need to clear the cons entry now.
917 */
Michael Chan11cd1192017-02-06 16:55:33 -0500918 mapping = cons_rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400919 page = cons_rx_buf->page;
920 cons_rx_buf->page = NULL;
921
922 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
923 struct skb_shared_info *shinfo;
924 unsigned int nr_frags;
925
926 shinfo = skb_shinfo(skb);
927 nr_frags = --shinfo->nr_frags;
928 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
929
930 dev_kfree_skb(skb);
931
932 cons_rx_buf->page = page;
933
934 /* Update prod since possibly some pages have been
935 * allocated already.
936 */
937 rxr->rx_agg_prod = prod;
938 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
939 return NULL;
940 }
941
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700942 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
943 PCI_DMA_FROMDEVICE,
944 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400945
946 skb->data_len += frag_len;
947 skb->len += frag_len;
948 skb->truesize += PAGE_SIZE;
949
950 prod = NEXT_RX_AGG(prod);
951 cp_cons = NEXT_CMP(cp_cons);
952 }
953 rxr->rx_agg_prod = prod;
954 return skb;
955}
956
957static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
958 u8 agg_bufs, u32 *raw_cons)
959{
960 u16 last;
961 struct rx_agg_cmp *agg;
962
963 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
964 last = RING_CMP(*raw_cons);
965 agg = (struct rx_agg_cmp *)
966 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
967 return RX_AGG_CMP_VALID(agg, *raw_cons);
968}
969
970static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
971 unsigned int len,
972 dma_addr_t mapping)
973{
974 struct bnxt *bp = bnapi->bp;
975 struct pci_dev *pdev = bp->pdev;
976 struct sk_buff *skb;
977
978 skb = napi_alloc_skb(&bnapi->napi, len);
979 if (!skb)
980 return NULL;
981
Michael Chan745fc052017-02-06 16:55:34 -0500982 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
983 bp->rx_dir);
Michael Chanc0c050c2015-10-22 16:01:17 -0400984
Michael Chan6bb19472017-02-06 16:55:32 -0500985 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
986 len + NET_IP_ALIGN);
Michael Chanc0c050c2015-10-22 16:01:17 -0400987
Michael Chan745fc052017-02-06 16:55:34 -0500988 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
989 bp->rx_dir);
Michael Chanc0c050c2015-10-22 16:01:17 -0400990
991 skb_put(skb, len);
992 return skb;
993}
994
Michael Chanfa7e2812016-05-10 19:18:00 -0400995static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
996 u32 *raw_cons, void *cmp)
997{
998 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
999 struct rx_cmp *rxcmp = cmp;
1000 u32 tmp_raw_cons = *raw_cons;
1001 u8 cmp_type, agg_bufs = 0;
1002
1003 cmp_type = RX_CMP_TYPE(rxcmp);
1004
1005 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1006 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1007 RX_CMP_AGG_BUFS) >>
1008 RX_CMP_AGG_BUFS_SHIFT;
1009 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1010 struct rx_tpa_end_cmp *tpa_end = cmp;
1011
1012 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1013 RX_TPA_END_CMP_AGG_BUFS) >>
1014 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1015 }
1016
1017 if (agg_bufs) {
1018 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1019 return -EBUSY;
1020 }
1021 *raw_cons = tmp_raw_cons;
1022 return 0;
1023}
1024
1025static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1026{
1027 if (!rxr->bnapi->in_reset) {
1028 rxr->bnapi->in_reset = true;
1029 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1030 schedule_work(&bp->sp_task);
1031 }
1032 rxr->rx_next_cons = 0xffff;
1033}
1034
Michael Chanc0c050c2015-10-22 16:01:17 -04001035static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1036 struct rx_tpa_start_cmp *tpa_start,
1037 struct rx_tpa_start_cmp_ext *tpa_start1)
1038{
1039 u8 agg_id = TPA_START_AGG_ID(tpa_start);
1040 u16 cons, prod;
1041 struct bnxt_tpa_info *tpa_info;
1042 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1043 struct rx_bd *prod_bd;
1044 dma_addr_t mapping;
1045
1046 cons = tpa_start->rx_tpa_start_cmp_opaque;
1047 prod = rxr->rx_prod;
1048 cons_rx_buf = &rxr->rx_buf_ring[cons];
1049 prod_rx_buf = &rxr->rx_buf_ring[prod];
1050 tpa_info = &rxr->rx_tpa[agg_id];
1051
Michael Chanfa7e2812016-05-10 19:18:00 -04001052 if (unlikely(cons != rxr->rx_next_cons)) {
1053 bnxt_sched_reset(bp, rxr);
1054 return;
1055 }
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001056 /* Store cfa_code in tpa_info to use in tpa_end
1057 * completion processing.
1058 */
1059 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
Michael Chanc0c050c2015-10-22 16:01:17 -04001060 prod_rx_buf->data = tpa_info->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001061 prod_rx_buf->data_ptr = tpa_info->data_ptr;
Michael Chanc0c050c2015-10-22 16:01:17 -04001062
1063 mapping = tpa_info->mapping;
Michael Chan11cd1192017-02-06 16:55:33 -05001064 prod_rx_buf->mapping = mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -04001065
1066 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1067
1068 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1069
1070 tpa_info->data = cons_rx_buf->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001071 tpa_info->data_ptr = cons_rx_buf->data_ptr;
Michael Chanc0c050c2015-10-22 16:01:17 -04001072 cons_rx_buf->data = NULL;
Michael Chan11cd1192017-02-06 16:55:33 -05001073 tpa_info->mapping = cons_rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -04001074
1075 tpa_info->len =
1076 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1077 RX_TPA_START_CMP_LEN_SHIFT;
1078 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1079 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1080
1081 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1082 tpa_info->gso_type = SKB_GSO_TCPV4;
1083 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1084 if (hash_type == 3)
1085 tpa_info->gso_type = SKB_GSO_TCPV6;
1086 tpa_info->rss_hash =
1087 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1088 } else {
1089 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1090 tpa_info->gso_type = 0;
1091 if (netif_msg_rx_err(bp))
1092 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1093 }
1094 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1095 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
Michael Chan94758f82016-06-13 02:25:35 -04001096 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
Michael Chanc0c050c2015-10-22 16:01:17 -04001097
1098 rxr->rx_prod = NEXT_RX(prod);
1099 cons = NEXT_RX(cons);
Michael Chan376a5b82016-05-10 19:17:59 -04001100 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001101 cons_rx_buf = &rxr->rx_buf_ring[cons];
1102
1103 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1104 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1105 cons_rx_buf->data = NULL;
1106}
1107
1108static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1109 u16 cp_cons, u32 agg_bufs)
1110{
1111 if (agg_bufs)
1112 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1113}
1114
Michael Chan94758f82016-06-13 02:25:35 -04001115static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1116 int payload_off, int tcp_ts,
1117 struct sk_buff *skb)
1118{
1119#ifdef CONFIG_INET
1120 struct tcphdr *th;
1121 int len, nw_off;
1122 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1123 u32 hdr_info = tpa_info->hdr_info;
1124 bool loopback = false;
1125
1126 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1127 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1128 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1129
1130 /* If the packet is an internal loopback packet, the offsets will
1131 * have an extra 4 bytes.
1132 */
1133 if (inner_mac_off == 4) {
1134 loopback = true;
1135 } else if (inner_mac_off > 4) {
1136 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1137 ETH_HLEN - 2));
1138
1139 /* We only support inner iPv4/ipv6. If we don't see the
1140 * correct protocol ID, it must be a loopback packet where
1141 * the offsets are off by 4.
1142 */
Dan Carpenter09a76362016-07-07 11:23:09 +03001143 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
Michael Chan94758f82016-06-13 02:25:35 -04001144 loopback = true;
1145 }
1146 if (loopback) {
1147 /* internal loopback packet, subtract all offsets by 4 */
1148 inner_ip_off -= 4;
1149 inner_mac_off -= 4;
1150 outer_ip_off -= 4;
1151 }
1152
1153 nw_off = inner_ip_off - ETH_HLEN;
1154 skb_set_network_header(skb, nw_off);
1155 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1156 struct ipv6hdr *iph = ipv6_hdr(skb);
1157
1158 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1159 len = skb->len - skb_transport_offset(skb);
1160 th = tcp_hdr(skb);
1161 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1162 } else {
1163 struct iphdr *iph = ip_hdr(skb);
1164
1165 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1166 len = skb->len - skb_transport_offset(skb);
1167 th = tcp_hdr(skb);
1168 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1169 }
1170
1171 if (inner_mac_off) { /* tunnel */
1172 struct udphdr *uh = NULL;
1173 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1174 ETH_HLEN - 2));
1175
1176 if (proto == htons(ETH_P_IP)) {
1177 struct iphdr *iph = (struct iphdr *)skb->data;
1178
1179 if (iph->protocol == IPPROTO_UDP)
1180 uh = (struct udphdr *)(iph + 1);
1181 } else {
1182 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1183
1184 if (iph->nexthdr == IPPROTO_UDP)
1185 uh = (struct udphdr *)(iph + 1);
1186 }
1187 if (uh) {
1188 if (uh->check)
1189 skb_shinfo(skb)->gso_type |=
1190 SKB_GSO_UDP_TUNNEL_CSUM;
1191 else
1192 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1193 }
1194 }
1195#endif
1196 return skb;
1197}
1198
Michael Chanc0c050c2015-10-22 16:01:17 -04001199#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1200#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1201
Michael Chan309369c2016-06-13 02:25:34 -04001202static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1203 int payload_off, int tcp_ts,
Michael Chanc0c050c2015-10-22 16:01:17 -04001204 struct sk_buff *skb)
1205{
Michael Chand1611c32015-10-25 22:27:57 -04001206#ifdef CONFIG_INET
Michael Chanc0c050c2015-10-22 16:01:17 -04001207 struct tcphdr *th;
Michael Chan719ca812017-01-17 22:07:19 -05001208 int len, nw_off, tcp_opt_len = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001209
Michael Chan309369c2016-06-13 02:25:34 -04001210 if (tcp_ts)
Michael Chanc0c050c2015-10-22 16:01:17 -04001211 tcp_opt_len = 12;
1212
Michael Chanc0c050c2015-10-22 16:01:17 -04001213 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1214 struct iphdr *iph;
1215
1216 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1217 ETH_HLEN;
1218 skb_set_network_header(skb, nw_off);
1219 iph = ip_hdr(skb);
1220 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1221 len = skb->len - skb_transport_offset(skb);
1222 th = tcp_hdr(skb);
1223 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1224 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1225 struct ipv6hdr *iph;
1226
1227 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1228 ETH_HLEN;
1229 skb_set_network_header(skb, nw_off);
1230 iph = ipv6_hdr(skb);
1231 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1232 len = skb->len - skb_transport_offset(skb);
1233 th = tcp_hdr(skb);
1234 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1235 } else {
1236 dev_kfree_skb_any(skb);
1237 return NULL;
1238 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001239
1240 if (nw_off) { /* tunnel */
1241 struct udphdr *uh = NULL;
1242
1243 if (skb->protocol == htons(ETH_P_IP)) {
1244 struct iphdr *iph = (struct iphdr *)skb->data;
1245
1246 if (iph->protocol == IPPROTO_UDP)
1247 uh = (struct udphdr *)(iph + 1);
1248 } else {
1249 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1250
1251 if (iph->nexthdr == IPPROTO_UDP)
1252 uh = (struct udphdr *)(iph + 1);
1253 }
1254 if (uh) {
1255 if (uh->check)
1256 skb_shinfo(skb)->gso_type |=
1257 SKB_GSO_UDP_TUNNEL_CSUM;
1258 else
1259 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1260 }
1261 }
1262#endif
1263 return skb;
1264}
1265
Michael Chan309369c2016-06-13 02:25:34 -04001266static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1267 struct bnxt_tpa_info *tpa_info,
1268 struct rx_tpa_end_cmp *tpa_end,
1269 struct rx_tpa_end_cmp_ext *tpa_end1,
1270 struct sk_buff *skb)
1271{
1272#ifdef CONFIG_INET
1273 int payload_off;
1274 u16 segs;
1275
1276 segs = TPA_END_TPA_SEGS(tpa_end);
1277 if (segs == 1)
1278 return skb;
1279
1280 NAPI_GRO_CB(skb)->count = segs;
1281 skb_shinfo(skb)->gso_size =
1282 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1283 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1284 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1285 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1286 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1287 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
Michael Chan59109062016-12-29 12:13:35 -05001288 if (likely(skb))
1289 tcp_gro_complete(skb);
Michael Chan309369c2016-06-13 02:25:34 -04001290#endif
1291 return skb;
1292}
1293
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001294/* Given the cfa_code of a received packet determine which
1295 * netdev (vf-rep or PF) the packet is destined to.
1296 */
1297static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1298{
1299 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1300
1301 /* if vf-rep dev is NULL, the must belongs to the PF */
1302 return dev ? dev : bp->dev;
1303}
1304
Michael Chanc0c050c2015-10-22 16:01:17 -04001305static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1306 struct bnxt_napi *bnapi,
1307 u32 *raw_cons,
1308 struct rx_tpa_end_cmp *tpa_end,
1309 struct rx_tpa_end_cmp_ext *tpa_end1,
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001310 u8 *event)
Michael Chanc0c050c2015-10-22 16:01:17 -04001311{
1312 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001313 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001314 u8 agg_id = TPA_END_AGG_ID(tpa_end);
Michael Chan6bb19472017-02-06 16:55:32 -05001315 u8 *data_ptr, agg_bufs;
Michael Chanc0c050c2015-10-22 16:01:17 -04001316 u16 cp_cons = RING_CMP(*raw_cons);
1317 unsigned int len;
1318 struct bnxt_tpa_info *tpa_info;
1319 dma_addr_t mapping;
1320 struct sk_buff *skb;
Michael Chan6bb19472017-02-06 16:55:32 -05001321 void *data;
Michael Chanc0c050c2015-10-22 16:01:17 -04001322
Michael Chanfa7e2812016-05-10 19:18:00 -04001323 if (unlikely(bnapi->in_reset)) {
1324 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1325
1326 if (rc < 0)
1327 return ERR_PTR(-EBUSY);
1328 return NULL;
1329 }
1330
Michael Chanc0c050c2015-10-22 16:01:17 -04001331 tpa_info = &rxr->rx_tpa[agg_id];
1332 data = tpa_info->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001333 data_ptr = tpa_info->data_ptr;
1334 prefetch(data_ptr);
Michael Chanc0c050c2015-10-22 16:01:17 -04001335 len = tpa_info->len;
1336 mapping = tpa_info->mapping;
1337
1338 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1339 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1340
1341 if (agg_bufs) {
1342 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1343 return ERR_PTR(-EBUSY);
1344
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001345 *event |= BNXT_AGG_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001346 cp_cons = NEXT_CMP(cp_cons);
1347 }
1348
Michael Chan69c149e2017-06-23 14:01:00 -04001349 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001350 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
Michael Chan69c149e2017-06-23 14:01:00 -04001351 if (agg_bufs > MAX_SKB_FRAGS)
1352 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1353 agg_bufs, (int)MAX_SKB_FRAGS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001354 return NULL;
1355 }
1356
1357 if (len <= bp->rx_copy_thresh) {
Michael Chan6bb19472017-02-06 16:55:32 -05001358 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -04001359 if (!skb) {
1360 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1361 return NULL;
1362 }
1363 } else {
1364 u8 *new_data;
1365 dma_addr_t new_mapping;
1366
1367 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1368 if (!new_data) {
1369 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1370 return NULL;
1371 }
1372
1373 tpa_info->data = new_data;
Michael Chanb3dba772017-02-06 16:55:35 -05001374 tpa_info->data_ptr = new_data + bp->rx_offset;
Michael Chanc0c050c2015-10-22 16:01:17 -04001375 tpa_info->mapping = new_mapping;
1376
1377 skb = build_skb(data, 0);
Shannon Nelsonc519fe92017-05-09 18:30:12 -07001378 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1379 bp->rx_buf_use_size, bp->rx_dir,
1380 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04001381
1382 if (!skb) {
1383 kfree(data);
1384 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1385 return NULL;
1386 }
Michael Chanb3dba772017-02-06 16:55:35 -05001387 skb_reserve(skb, bp->rx_offset);
Michael Chanc0c050c2015-10-22 16:01:17 -04001388 skb_put(skb, len);
1389 }
1390
1391 if (agg_bufs) {
1392 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1393 if (!skb) {
1394 /* Page reuse already handled by bnxt_rx_pages(). */
1395 return NULL;
1396 }
1397 }
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001398
1399 skb->protocol =
1400 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
Michael Chanc0c050c2015-10-22 16:01:17 -04001401
1402 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1403 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1404
Michael Chan8852ddb2016-06-06 02:37:16 -04001405 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1406 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001407 u16 vlan_proto = tpa_info->metadata >>
1408 RX_CMP_FLAGS2_METADATA_TPID_SFT;
Michael Chan8852ddb2016-06-06 02:37:16 -04001409 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001410
Michael Chan8852ddb2016-06-06 02:37:16 -04001411 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001412 }
1413
1414 skb_checksum_none_assert(skb);
1415 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1416 skb->ip_summed = CHECKSUM_UNNECESSARY;
1417 skb->csum_level =
1418 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1419 }
1420
1421 if (TPA_END_GRO(tpa_end))
Michael Chan309369c2016-06-13 02:25:34 -04001422 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001423
1424 return skb;
1425}
1426
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001427static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1428 struct sk_buff *skb)
1429{
1430 if (skb->dev != bp->dev) {
1431 /* this packet belongs to a vf-rep */
1432 bnxt_vf_rep_rx(bp, skb);
1433 return;
1434 }
1435 skb_record_rx_queue(skb, bnapi->index);
1436 napi_gro_receive(&bnapi->napi, skb);
1437}
1438
Michael Chanc0c050c2015-10-22 16:01:17 -04001439/* returns the following:
1440 * 1 - 1 packet successfully received
1441 * 0 - successful TPA_START, packet not completed yet
1442 * -EBUSY - completion ring does not have all the agg buffers yet
1443 * -ENOMEM - packet aborted due to out of memory
1444 * -EIO - packet aborted due to hw error indicated in BD
1445 */
1446static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001447 u8 *event)
Michael Chanc0c050c2015-10-22 16:01:17 -04001448{
1449 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001450 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001451 struct net_device *dev = bp->dev;
1452 struct rx_cmp *rxcmp;
1453 struct rx_cmp_ext *rxcmp1;
1454 u32 tmp_raw_cons = *raw_cons;
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001455 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001456 struct bnxt_sw_rx_bd *rx_buf;
1457 unsigned int len;
Michael Chan6bb19472017-02-06 16:55:32 -05001458 u8 *data_ptr, agg_bufs, cmp_type;
Michael Chanc0c050c2015-10-22 16:01:17 -04001459 dma_addr_t dma_addr;
1460 struct sk_buff *skb;
Michael Chan6bb19472017-02-06 16:55:32 -05001461 void *data;
Michael Chanc0c050c2015-10-22 16:01:17 -04001462 int rc = 0;
Michael Chanc61fb992017-02-06 16:55:36 -05001463 u32 misc;
Michael Chanc0c050c2015-10-22 16:01:17 -04001464
1465 rxcmp = (struct rx_cmp *)
1466 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1467
1468 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1469 cp_cons = RING_CMP(tmp_raw_cons);
1470 rxcmp1 = (struct rx_cmp_ext *)
1471 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1472
1473 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1474 return -EBUSY;
1475
1476 cmp_type = RX_CMP_TYPE(rxcmp);
1477
1478 prod = rxr->rx_prod;
1479
1480 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1481 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1482 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1483
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001484 *event |= BNXT_RX_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001485 goto next_rx_no_prod;
1486
1487 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1488 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1489 (struct rx_tpa_end_cmp *)rxcmp,
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001490 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001491
1492 if (unlikely(IS_ERR(skb)))
1493 return -EBUSY;
1494
1495 rc = -ENOMEM;
1496 if (likely(skb)) {
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001497 bnxt_deliver_skb(bp, bnapi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001498 rc = 1;
1499 }
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001500 *event |= BNXT_RX_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001501 goto next_rx_no_prod;
1502 }
1503
1504 cons = rxcmp->rx_cmp_opaque;
1505 rx_buf = &rxr->rx_buf_ring[cons];
1506 data = rx_buf->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001507 data_ptr = rx_buf->data_ptr;
Michael Chanfa7e2812016-05-10 19:18:00 -04001508 if (unlikely(cons != rxr->rx_next_cons)) {
1509 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1510
1511 bnxt_sched_reset(bp, rxr);
1512 return rc1;
1513 }
Michael Chan6bb19472017-02-06 16:55:32 -05001514 prefetch(data_ptr);
Michael Chanc0c050c2015-10-22 16:01:17 -04001515
Michael Chanc61fb992017-02-06 16:55:36 -05001516 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1517 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001518
1519 if (agg_bufs) {
1520 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1521 return -EBUSY;
1522
1523 cp_cons = NEXT_CMP(cp_cons);
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001524 *event |= BNXT_AGG_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001525 }
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001526 *event |= BNXT_RX_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001527
1528 rx_buf->data = NULL;
1529 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1530 bnxt_reuse_rx_data(rxr, cons, data);
1531 if (agg_bufs)
1532 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1533
1534 rc = -EIO;
1535 goto next_rx;
1536 }
1537
1538 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
Michael Chan11cd1192017-02-06 16:55:33 -05001539 dma_addr = rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -04001540
Michael Chanc6d30e82017-02-06 16:55:42 -05001541 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1542 rc = 1;
1543 goto next_rx;
1544 }
1545
Michael Chanc0c050c2015-10-22 16:01:17 -04001546 if (len <= bp->rx_copy_thresh) {
Michael Chan6bb19472017-02-06 16:55:32 -05001547 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
Michael Chanc0c050c2015-10-22 16:01:17 -04001548 bnxt_reuse_rx_data(rxr, cons, data);
1549 if (!skb) {
1550 rc = -ENOMEM;
1551 goto next_rx;
1552 }
1553 } else {
Michael Chanc61fb992017-02-06 16:55:36 -05001554 u32 payload;
1555
Michael Chanc6d30e82017-02-06 16:55:42 -05001556 if (rx_buf->data_ptr == data_ptr)
1557 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1558 else
1559 payload = 0;
Michael Chan6bb19472017-02-06 16:55:32 -05001560 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
Michael Chanc61fb992017-02-06 16:55:36 -05001561 payload | len);
Michael Chanc0c050c2015-10-22 16:01:17 -04001562 if (!skb) {
1563 rc = -ENOMEM;
1564 goto next_rx;
1565 }
1566 }
1567
1568 if (agg_bufs) {
1569 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1570 if (!skb) {
1571 rc = -ENOMEM;
1572 goto next_rx;
1573 }
1574 }
1575
1576 if (RX_CMP_HASH_VALID(rxcmp)) {
1577 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1578 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1579
1580 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1581 if (hash_type != 1 && hash_type != 3)
1582 type = PKT_HASH_TYPE_L3;
1583 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1584 }
1585
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001586 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1587 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
Michael Chanc0c050c2015-10-22 16:01:17 -04001588
Michael Chan8852ddb2016-06-06 02:37:16 -04001589 if ((rxcmp1->rx_cmp_flags2 &
1590 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1591 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001592 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
Michael Chan8852ddb2016-06-06 02:37:16 -04001593 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001594 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1595
Michael Chan8852ddb2016-06-06 02:37:16 -04001596 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001597 }
1598
1599 skb_checksum_none_assert(skb);
1600 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1601 if (dev->features & NETIF_F_RXCSUM) {
1602 skb->ip_summed = CHECKSUM_UNNECESSARY;
1603 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1604 }
1605 } else {
Satish Baddipadige665e3502015-12-27 18:19:21 -05001606 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1607 if (dev->features & NETIF_F_RXCSUM)
1608 cpr->rx_l4_csum_errors++;
1609 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001610 }
1611
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001612 bnxt_deliver_skb(bp, bnapi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001613 rc = 1;
1614
1615next_rx:
1616 rxr->rx_prod = NEXT_RX(prod);
Michael Chan376a5b82016-05-10 19:17:59 -04001617 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001618
1619next_rx_no_prod:
1620 *raw_cons = tmp_raw_cons;
1621
1622 return rc;
1623}
1624
Michael Chan2270bc52017-06-23 14:01:01 -04001625/* In netpoll mode, if we are using a combined completion ring, we need to
1626 * discard the rx packets and recycle the buffers.
1627 */
1628static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
1629 u32 *raw_cons, u8 *event)
1630{
1631 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1632 u32 tmp_raw_cons = *raw_cons;
1633 struct rx_cmp_ext *rxcmp1;
1634 struct rx_cmp *rxcmp;
1635 u16 cp_cons;
1636 u8 cmp_type;
1637
1638 cp_cons = RING_CMP(tmp_raw_cons);
1639 rxcmp = (struct rx_cmp *)
1640 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1641
1642 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1643 cp_cons = RING_CMP(tmp_raw_cons);
1644 rxcmp1 = (struct rx_cmp_ext *)
1645 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1646
1647 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1648 return -EBUSY;
1649
1650 cmp_type = RX_CMP_TYPE(rxcmp);
1651 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1652 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1653 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1654 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1655 struct rx_tpa_end_cmp_ext *tpa_end1;
1656
1657 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1658 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1659 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1660 }
1661 return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
1662}
1663
Michael Chan4bb13ab2016-04-05 14:09:01 -04001664#define BNXT_GET_EVENT_PORT(data) \
Michael Chan87c374d2016-12-02 21:17:16 -05001665 ((data) & \
1666 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
Michael Chan4bb13ab2016-04-05 14:09:01 -04001667
Michael Chanc0c050c2015-10-22 16:01:17 -04001668static int bnxt_async_event_process(struct bnxt *bp,
1669 struct hwrm_async_event_cmpl *cmpl)
1670{
1671 u16 event_id = le16_to_cpu(cmpl->event_id);
1672
1673 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1674 switch (event_id) {
Michael Chan87c374d2016-12-02 21:17:16 -05001675 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
Michael Chan8cbde112016-04-11 04:11:14 -04001676 u32 data1 = le32_to_cpu(cmpl->event_data1);
1677 struct bnxt_link_info *link_info = &bp->link_info;
1678
1679 if (BNXT_VF(bp))
1680 goto async_event_process_exit;
1681 if (data1 & 0x20000) {
1682 u16 fw_speed = link_info->force_link_speed;
1683 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1684
1685 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1686 speed);
1687 }
Michael Chan286ef9d2016-11-16 21:13:08 -05001688 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
Michael Chan8cbde112016-04-11 04:11:14 -04001689 /* fall thru */
1690 }
Michael Chan87c374d2016-12-02 21:17:16 -05001691 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
Michael Chanc0c050c2015-10-22 16:01:17 -04001692 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
Jeffrey Huang19241362016-02-26 04:00:00 -05001693 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001694 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
Jeffrey Huang19241362016-02-26 04:00:00 -05001695 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001696 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001697 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
Michael Chan4bb13ab2016-04-05 14:09:01 -04001698 u32 data1 = le32_to_cpu(cmpl->event_data1);
1699 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1700
1701 if (BNXT_VF(bp))
1702 break;
1703
1704 if (bp->pf.port_id != port_id)
1705 break;
1706
Michael Chan4bb13ab2016-04-05 14:09:01 -04001707 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1708 break;
1709 }
Michael Chan87c374d2016-12-02 21:17:16 -05001710 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
Michael Chanfc0f1922016-06-13 02:25:30 -04001711 if (BNXT_PF(bp))
1712 goto async_event_process_exit;
1713 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1714 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001715 default:
Jeffrey Huang19241362016-02-26 04:00:00 -05001716 goto async_event_process_exit;
Michael Chanc0c050c2015-10-22 16:01:17 -04001717 }
Jeffrey Huang19241362016-02-26 04:00:00 -05001718 schedule_work(&bp->sp_task);
1719async_event_process_exit:
Michael Chana588e452016-12-07 00:26:21 -05001720 bnxt_ulp_async_events(bp, cmpl);
Michael Chanc0c050c2015-10-22 16:01:17 -04001721 return 0;
1722}
1723
1724static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1725{
1726 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1727 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1728 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1729 (struct hwrm_fwd_req_cmpl *)txcmp;
1730
1731 switch (cmpl_type) {
1732 case CMPL_BASE_TYPE_HWRM_DONE:
1733 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1734 if (seq_id == bp->hwrm_intr_seq_id)
1735 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1736 else
1737 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1738 break;
1739
1740 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1741 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1742
1743 if ((vf_id < bp->pf.first_vf_id) ||
1744 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1745 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1746 vf_id);
1747 return -EINVAL;
1748 }
1749
1750 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1751 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1752 schedule_work(&bp->sp_task);
1753 break;
1754
1755 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1756 bnxt_async_event_process(bp,
1757 (struct hwrm_async_event_cmpl *)txcmp);
1758
1759 default:
1760 break;
1761 }
1762
1763 return 0;
1764}
1765
1766static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1767{
1768 struct bnxt_napi *bnapi = dev_instance;
1769 struct bnxt *bp = bnapi->bp;
1770 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1771 u32 cons = RING_CMP(cpr->cp_raw_cons);
1772
1773 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1774 napi_schedule(&bnapi->napi);
1775 return IRQ_HANDLED;
1776}
1777
1778static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1779{
1780 u32 raw_cons = cpr->cp_raw_cons;
1781 u16 cons = RING_CMP(raw_cons);
1782 struct tx_cmp *txcmp;
1783
1784 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1785
1786 return TX_CMP_VALID(txcmp, raw_cons);
1787}
1788
Michael Chanc0c050c2015-10-22 16:01:17 -04001789static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1790{
1791 struct bnxt_napi *bnapi = dev_instance;
1792 struct bnxt *bp = bnapi->bp;
1793 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1794 u32 cons = RING_CMP(cpr->cp_raw_cons);
1795 u32 int_status;
1796
1797 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1798
1799 if (!bnxt_has_work(bp, cpr)) {
Jeffrey Huang11809492015-11-05 16:25:49 -05001800 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001801 /* return if erroneous interrupt */
1802 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1803 return IRQ_NONE;
1804 }
1805
1806 /* disable ring IRQ */
1807 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1808
1809 /* Return here if interrupt is shared and is disabled. */
1810 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1811 return IRQ_HANDLED;
1812
1813 napi_schedule(&bnapi->napi);
1814 return IRQ_HANDLED;
1815}
1816
1817static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1818{
1819 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1820 u32 raw_cons = cpr->cp_raw_cons;
1821 u32 cons;
1822 int tx_pkts = 0;
1823 int rx_pkts = 0;
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001824 u8 event = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001825 struct tx_cmp *txcmp;
1826
1827 while (1) {
1828 int rc;
1829
1830 cons = RING_CMP(raw_cons);
1831 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1832
1833 if (!TX_CMP_VALID(txcmp, raw_cons))
1834 break;
1835
Michael Chan67a95e22016-05-04 16:56:43 -04001836 /* The valid test of the entry must be done first before
1837 * reading any further.
1838 */
Michael Chanb67daab2016-05-15 03:04:51 -04001839 dma_rmb();
Michael Chanc0c050c2015-10-22 16:01:17 -04001840 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1841 tx_pkts++;
1842 /* return full budget so NAPI will complete. */
1843 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1844 rx_pkts = budget;
1845 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
Michael Chan2270bc52017-06-23 14:01:01 -04001846 if (likely(budget))
1847 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1848 else
1849 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
1850 &event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001851 if (likely(rc >= 0))
1852 rx_pkts += rc;
Michael Chan903649e2017-08-28 13:40:30 -04001853 /* Increment rx_pkts when rc is -ENOMEM to count towards
1854 * the NAPI budget. Otherwise, we may potentially loop
1855 * here forever if we consistently cannot allocate
1856 * buffers.
1857 */
1858 else if (rc == -ENOMEM)
1859 rx_pkts++;
Michael Chanc0c050c2015-10-22 16:01:17 -04001860 else if (rc == -EBUSY) /* partial completion */
1861 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001862 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1863 CMPL_BASE_TYPE_HWRM_DONE) ||
1864 (TX_CMP_TYPE(txcmp) ==
1865 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1866 (TX_CMP_TYPE(txcmp) ==
1867 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1868 bnxt_hwrm_handler(bp, txcmp);
1869 }
1870 raw_cons = NEXT_RAW_CMP(raw_cons);
1871
1872 if (rx_pkts == budget)
1873 break;
1874 }
1875
Michael Chan38413402017-02-06 16:55:43 -05001876 if (event & BNXT_TX_EVENT) {
1877 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1878 void __iomem *db = txr->tx_doorbell;
1879 u16 prod = txr->tx_prod;
1880
1881 /* Sync BD data before updating doorbell */
1882 wmb();
1883
Michael Chan434c9752017-05-29 19:06:08 -04001884 bnxt_db_write(bp, db, DB_KEY_TX | prod);
Michael Chan38413402017-02-06 16:55:43 -05001885 }
1886
Michael Chanc0c050c2015-10-22 16:01:17 -04001887 cpr->cp_raw_cons = raw_cons;
1888 /* ACK completion ring before freeing tx ring and producing new
1889 * buffers in rx/agg rings to prevent overflowing the completion
1890 * ring.
1891 */
1892 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1893
1894 if (tx_pkts)
Michael Chanfa3e93e2017-02-06 16:55:41 -05001895 bnapi->tx_int(bp, bnapi, tx_pkts);
Michael Chanc0c050c2015-10-22 16:01:17 -04001896
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001897 if (event & BNXT_RX_EVENT) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001898 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001899
Michael Chan434c9752017-05-29 19:06:08 -04001900 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1901 if (event & BNXT_AGG_EVENT)
1902 bnxt_db_write(bp, rxr->rx_agg_doorbell,
1903 DB_KEY_RX | rxr->rx_agg_prod);
Michael Chanc0c050c2015-10-22 16:01:17 -04001904 }
1905 return rx_pkts;
1906}
1907
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04001908static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1909{
1910 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1911 struct bnxt *bp = bnapi->bp;
1912 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1913 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1914 struct tx_cmp *txcmp;
1915 struct rx_cmp_ext *rxcmp1;
1916 u32 cp_cons, tmp_raw_cons;
1917 u32 raw_cons = cpr->cp_raw_cons;
1918 u32 rx_pkts = 0;
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001919 u8 event = 0;
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04001920
1921 while (1) {
1922 int rc;
1923
1924 cp_cons = RING_CMP(raw_cons);
1925 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1926
1927 if (!TX_CMP_VALID(txcmp, raw_cons))
1928 break;
1929
1930 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1931 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1932 cp_cons = RING_CMP(tmp_raw_cons);
1933 rxcmp1 = (struct rx_cmp_ext *)
1934 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1935
1936 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1937 break;
1938
1939 /* force an error to recycle the buffer */
1940 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1941 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1942
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001943 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04001944 if (likely(rc == -EIO))
1945 rx_pkts++;
1946 else if (rc == -EBUSY) /* partial completion */
1947 break;
1948 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1949 CMPL_BASE_TYPE_HWRM_DONE)) {
1950 bnxt_hwrm_handler(bp, txcmp);
1951 } else {
1952 netdev_err(bp->dev,
1953 "Invalid completion received on special ring\n");
1954 }
1955 raw_cons = NEXT_RAW_CMP(raw_cons);
1956
1957 if (rx_pkts == budget)
1958 break;
1959 }
1960
1961 cpr->cp_raw_cons = raw_cons;
1962 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
Michael Chan434c9752017-05-29 19:06:08 -04001963 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04001964
Michael Chan434c9752017-05-29 19:06:08 -04001965 if (event & BNXT_AGG_EVENT)
1966 bnxt_db_write(bp, rxr->rx_agg_doorbell,
1967 DB_KEY_RX | rxr->rx_agg_prod);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04001968
1969 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08001970 napi_complete_done(napi, rx_pkts);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04001971 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1972 }
1973 return rx_pkts;
1974}
1975
Michael Chanc0c050c2015-10-22 16:01:17 -04001976static int bnxt_poll(struct napi_struct *napi, int budget)
1977{
1978 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1979 struct bnxt *bp = bnapi->bp;
1980 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1981 int work_done = 0;
1982
Michael Chanc0c050c2015-10-22 16:01:17 -04001983 while (1) {
1984 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1985
1986 if (work_done >= budget)
1987 break;
1988
1989 if (!bnxt_has_work(bp, cpr)) {
Michael Chane7b95692016-12-29 12:13:32 -05001990 if (napi_complete_done(napi, work_done))
1991 BNXT_CP_DB_REARM(cpr->cp_doorbell,
1992 cpr->cp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001993 break;
1994 }
1995 }
1996 mmiowb();
Michael Chanc0c050c2015-10-22 16:01:17 -04001997 return work_done;
1998}
1999
Michael Chanc0c050c2015-10-22 16:01:17 -04002000static void bnxt_free_tx_skbs(struct bnxt *bp)
2001{
2002 int i, max_idx;
2003 struct pci_dev *pdev = bp->pdev;
2004
Michael Chanb6ab4b02016-01-02 23:44:59 -05002005 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002006 return;
2007
2008 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2009 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002010 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002011 int j;
2012
Michael Chanc0c050c2015-10-22 16:01:17 -04002013 for (j = 0; j < max_idx;) {
2014 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2015 struct sk_buff *skb = tx_buf->skb;
2016 int k, last;
2017
2018 if (!skb) {
2019 j++;
2020 continue;
2021 }
2022
2023 tx_buf->skb = NULL;
2024
2025 if (tx_buf->is_push) {
2026 dev_kfree_skb(skb);
2027 j += 2;
2028 continue;
2029 }
2030
2031 dma_unmap_single(&pdev->dev,
2032 dma_unmap_addr(tx_buf, mapping),
2033 skb_headlen(skb),
2034 PCI_DMA_TODEVICE);
2035
2036 last = tx_buf->nr_frags;
2037 j += 2;
Michael Chand612a572016-01-28 03:11:22 -05002038 for (k = 0; k < last; k++, j++) {
2039 int ring_idx = j & bp->tx_ring_mask;
Michael Chanc0c050c2015-10-22 16:01:17 -04002040 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2041
Michael Chand612a572016-01-28 03:11:22 -05002042 tx_buf = &txr->tx_buf_ring[ring_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04002043 dma_unmap_page(
2044 &pdev->dev,
2045 dma_unmap_addr(tx_buf, mapping),
2046 skb_frag_size(frag), PCI_DMA_TODEVICE);
2047 }
2048 dev_kfree_skb(skb);
2049 }
2050 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2051 }
2052}
2053
2054static void bnxt_free_rx_skbs(struct bnxt *bp)
2055{
2056 int i, max_idx, max_agg_idx;
2057 struct pci_dev *pdev = bp->pdev;
2058
Michael Chanb6ab4b02016-01-02 23:44:59 -05002059 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002060 return;
2061
2062 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2063 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2064 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002065 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002066 int j;
2067
Michael Chanc0c050c2015-10-22 16:01:17 -04002068 if (rxr->rx_tpa) {
2069 for (j = 0; j < MAX_TPA; j++) {
2070 struct bnxt_tpa_info *tpa_info =
2071 &rxr->rx_tpa[j];
2072 u8 *data = tpa_info->data;
2073
2074 if (!data)
2075 continue;
2076
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002077 dma_unmap_single_attrs(&pdev->dev,
2078 tpa_info->mapping,
2079 bp->rx_buf_use_size,
2080 bp->rx_dir,
2081 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04002082
2083 tpa_info->data = NULL;
2084
2085 kfree(data);
2086 }
2087 }
2088
2089 for (j = 0; j < max_idx; j++) {
2090 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
Michael Chan3ed3a832017-03-28 19:47:31 -04002091 dma_addr_t mapping = rx_buf->mapping;
Michael Chan6bb19472017-02-06 16:55:32 -05002092 void *data = rx_buf->data;
Michael Chanc0c050c2015-10-22 16:01:17 -04002093
2094 if (!data)
2095 continue;
2096
Michael Chanc0c050c2015-10-22 16:01:17 -04002097 rx_buf->data = NULL;
2098
Michael Chan3ed3a832017-03-28 19:47:31 -04002099 if (BNXT_RX_PAGE_MODE(bp)) {
2100 mapping -= bp->rx_dma_offset;
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002101 dma_unmap_page_attrs(&pdev->dev, mapping,
2102 PAGE_SIZE, bp->rx_dir,
2103 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -05002104 __free_page(data);
Michael Chan3ed3a832017-03-28 19:47:31 -04002105 } else {
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002106 dma_unmap_single_attrs(&pdev->dev, mapping,
2107 bp->rx_buf_use_size,
2108 bp->rx_dir,
2109 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -05002110 kfree(data);
Michael Chan3ed3a832017-03-28 19:47:31 -04002111 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002112 }
2113
2114 for (j = 0; j < max_agg_idx; j++) {
2115 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2116 &rxr->rx_agg_ring[j];
2117 struct page *page = rx_agg_buf->page;
2118
2119 if (!page)
2120 continue;
2121
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002122 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2123 BNXT_RX_PAGE_SIZE,
2124 PCI_DMA_FROMDEVICE,
2125 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04002126
2127 rx_agg_buf->page = NULL;
2128 __clear_bit(j, rxr->rx_agg_bmap);
2129
2130 __free_page(page);
2131 }
Michael Chan89d0a062016-04-25 02:30:51 -04002132 if (rxr->rx_page) {
2133 __free_page(rxr->rx_page);
2134 rxr->rx_page = NULL;
2135 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002136 }
2137}
2138
2139static void bnxt_free_skbs(struct bnxt *bp)
2140{
2141 bnxt_free_tx_skbs(bp);
2142 bnxt_free_rx_skbs(bp);
2143}
2144
2145static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2146{
2147 struct pci_dev *pdev = bp->pdev;
2148 int i;
2149
2150 for (i = 0; i < ring->nr_pages; i++) {
2151 if (!ring->pg_arr[i])
2152 continue;
2153
2154 dma_free_coherent(&pdev->dev, ring->page_size,
2155 ring->pg_arr[i], ring->dma_arr[i]);
2156
2157 ring->pg_arr[i] = NULL;
2158 }
2159 if (ring->pg_tbl) {
2160 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
2161 ring->pg_tbl, ring->pg_tbl_map);
2162 ring->pg_tbl = NULL;
2163 }
2164 if (ring->vmem_size && *ring->vmem) {
2165 vfree(*ring->vmem);
2166 *ring->vmem = NULL;
2167 }
2168}
2169
2170static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2171{
2172 int i;
2173 struct pci_dev *pdev = bp->pdev;
2174
2175 if (ring->nr_pages > 1) {
2176 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
2177 ring->nr_pages * 8,
2178 &ring->pg_tbl_map,
2179 GFP_KERNEL);
2180 if (!ring->pg_tbl)
2181 return -ENOMEM;
2182 }
2183
2184 for (i = 0; i < ring->nr_pages; i++) {
2185 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2186 ring->page_size,
2187 &ring->dma_arr[i],
2188 GFP_KERNEL);
2189 if (!ring->pg_arr[i])
2190 return -ENOMEM;
2191
2192 if (ring->nr_pages > 1)
2193 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
2194 }
2195
2196 if (ring->vmem_size) {
2197 *ring->vmem = vzalloc(ring->vmem_size);
2198 if (!(*ring->vmem))
2199 return -ENOMEM;
2200 }
2201 return 0;
2202}
2203
2204static void bnxt_free_rx_rings(struct bnxt *bp)
2205{
2206 int i;
2207
Michael Chanb6ab4b02016-01-02 23:44:59 -05002208 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002209 return;
2210
2211 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002212 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002213 struct bnxt_ring_struct *ring;
2214
Michael Chanc6d30e82017-02-06 16:55:42 -05002215 if (rxr->xdp_prog)
2216 bpf_prog_put(rxr->xdp_prog);
2217
Michael Chanc0c050c2015-10-22 16:01:17 -04002218 kfree(rxr->rx_tpa);
2219 rxr->rx_tpa = NULL;
2220
2221 kfree(rxr->rx_agg_bmap);
2222 rxr->rx_agg_bmap = NULL;
2223
2224 ring = &rxr->rx_ring_struct;
2225 bnxt_free_ring(bp, ring);
2226
2227 ring = &rxr->rx_agg_ring_struct;
2228 bnxt_free_ring(bp, ring);
2229 }
2230}
2231
2232static int bnxt_alloc_rx_rings(struct bnxt *bp)
2233{
2234 int i, rc, agg_rings = 0, tpa_rings = 0;
2235
Michael Chanb6ab4b02016-01-02 23:44:59 -05002236 if (!bp->rx_ring)
2237 return -ENOMEM;
2238
Michael Chanc0c050c2015-10-22 16:01:17 -04002239 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2240 agg_rings = 1;
2241
2242 if (bp->flags & BNXT_FLAG_TPA)
2243 tpa_rings = 1;
2244
2245 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002246 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002247 struct bnxt_ring_struct *ring;
2248
Michael Chanc0c050c2015-10-22 16:01:17 -04002249 ring = &rxr->rx_ring_struct;
2250
2251 rc = bnxt_alloc_ring(bp, ring);
2252 if (rc)
2253 return rc;
2254
2255 if (agg_rings) {
2256 u16 mem_size;
2257
2258 ring = &rxr->rx_agg_ring_struct;
2259 rc = bnxt_alloc_ring(bp, ring);
2260 if (rc)
2261 return rc;
2262
2263 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2264 mem_size = rxr->rx_agg_bmap_size / 8;
2265 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2266 if (!rxr->rx_agg_bmap)
2267 return -ENOMEM;
2268
2269 if (tpa_rings) {
2270 rxr->rx_tpa = kcalloc(MAX_TPA,
2271 sizeof(struct bnxt_tpa_info),
2272 GFP_KERNEL);
2273 if (!rxr->rx_tpa)
2274 return -ENOMEM;
2275 }
2276 }
2277 }
2278 return 0;
2279}
2280
2281static void bnxt_free_tx_rings(struct bnxt *bp)
2282{
2283 int i;
2284 struct pci_dev *pdev = bp->pdev;
2285
Michael Chanb6ab4b02016-01-02 23:44:59 -05002286 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002287 return;
2288
2289 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002290 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002291 struct bnxt_ring_struct *ring;
2292
Michael Chanc0c050c2015-10-22 16:01:17 -04002293 if (txr->tx_push) {
2294 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2295 txr->tx_push, txr->tx_push_mapping);
2296 txr->tx_push = NULL;
2297 }
2298
2299 ring = &txr->tx_ring_struct;
2300
2301 bnxt_free_ring(bp, ring);
2302 }
2303}
2304
2305static int bnxt_alloc_tx_rings(struct bnxt *bp)
2306{
2307 int i, j, rc;
2308 struct pci_dev *pdev = bp->pdev;
2309
2310 bp->tx_push_size = 0;
2311 if (bp->tx_push_thresh) {
2312 int push_size;
2313
2314 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2315 bp->tx_push_thresh);
2316
Michael Chan4419dbe2016-02-10 17:33:49 -05002317 if (push_size > 256) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002318 push_size = 0;
2319 bp->tx_push_thresh = 0;
2320 }
2321
2322 bp->tx_push_size = push_size;
2323 }
2324
2325 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002326 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002327 struct bnxt_ring_struct *ring;
2328
Michael Chanc0c050c2015-10-22 16:01:17 -04002329 ring = &txr->tx_ring_struct;
2330
2331 rc = bnxt_alloc_ring(bp, ring);
2332 if (rc)
2333 return rc;
2334
2335 if (bp->tx_push_size) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002336 dma_addr_t mapping;
2337
2338 /* One pre-allocated DMA buffer to backup
2339 * TX push operation
2340 */
2341 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2342 bp->tx_push_size,
2343 &txr->tx_push_mapping,
2344 GFP_KERNEL);
2345
2346 if (!txr->tx_push)
2347 return -ENOMEM;
2348
Michael Chanc0c050c2015-10-22 16:01:17 -04002349 mapping = txr->tx_push_mapping +
2350 sizeof(struct tx_push_bd);
Michael Chan4419dbe2016-02-10 17:33:49 -05002351 txr->data_mapping = cpu_to_le64(mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -04002352
Michael Chan4419dbe2016-02-10 17:33:49 -05002353 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
Michael Chanc0c050c2015-10-22 16:01:17 -04002354 }
2355 ring->queue_id = bp->q_info[j].queue_id;
Michael Chan5f449242017-02-06 16:55:40 -05002356 if (i < bp->tx_nr_rings_xdp)
2357 continue;
Michael Chanc0c050c2015-10-22 16:01:17 -04002358 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2359 j++;
2360 }
2361 return 0;
2362}
2363
2364static void bnxt_free_cp_rings(struct bnxt *bp)
2365{
2366 int i;
2367
2368 if (!bp->bnapi)
2369 return;
2370
2371 for (i = 0; i < bp->cp_nr_rings; i++) {
2372 struct bnxt_napi *bnapi = bp->bnapi[i];
2373 struct bnxt_cp_ring_info *cpr;
2374 struct bnxt_ring_struct *ring;
2375
2376 if (!bnapi)
2377 continue;
2378
2379 cpr = &bnapi->cp_ring;
2380 ring = &cpr->cp_ring_struct;
2381
2382 bnxt_free_ring(bp, ring);
2383 }
2384}
2385
2386static int bnxt_alloc_cp_rings(struct bnxt *bp)
2387{
2388 int i, rc;
2389
2390 for (i = 0; i < bp->cp_nr_rings; i++) {
2391 struct bnxt_napi *bnapi = bp->bnapi[i];
2392 struct bnxt_cp_ring_info *cpr;
2393 struct bnxt_ring_struct *ring;
2394
2395 if (!bnapi)
2396 continue;
2397
2398 cpr = &bnapi->cp_ring;
2399 ring = &cpr->cp_ring_struct;
2400
2401 rc = bnxt_alloc_ring(bp, ring);
2402 if (rc)
2403 return rc;
2404 }
2405 return 0;
2406}
2407
2408static void bnxt_init_ring_struct(struct bnxt *bp)
2409{
2410 int i;
2411
2412 for (i = 0; i < bp->cp_nr_rings; i++) {
2413 struct bnxt_napi *bnapi = bp->bnapi[i];
2414 struct bnxt_cp_ring_info *cpr;
2415 struct bnxt_rx_ring_info *rxr;
2416 struct bnxt_tx_ring_info *txr;
2417 struct bnxt_ring_struct *ring;
2418
2419 if (!bnapi)
2420 continue;
2421
2422 cpr = &bnapi->cp_ring;
2423 ring = &cpr->cp_ring_struct;
2424 ring->nr_pages = bp->cp_nr_pages;
2425 ring->page_size = HW_CMPD_RING_SIZE;
2426 ring->pg_arr = (void **)cpr->cp_desc_ring;
2427 ring->dma_arr = cpr->cp_desc_mapping;
2428 ring->vmem_size = 0;
2429
Michael Chanb6ab4b02016-01-02 23:44:59 -05002430 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002431 if (!rxr)
2432 goto skip_rx;
2433
Michael Chanc0c050c2015-10-22 16:01:17 -04002434 ring = &rxr->rx_ring_struct;
2435 ring->nr_pages = bp->rx_nr_pages;
2436 ring->page_size = HW_RXBD_RING_SIZE;
2437 ring->pg_arr = (void **)rxr->rx_desc_ring;
2438 ring->dma_arr = rxr->rx_desc_mapping;
2439 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2440 ring->vmem = (void **)&rxr->rx_buf_ring;
2441
2442 ring = &rxr->rx_agg_ring_struct;
2443 ring->nr_pages = bp->rx_agg_nr_pages;
2444 ring->page_size = HW_RXBD_RING_SIZE;
2445 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2446 ring->dma_arr = rxr->rx_agg_desc_mapping;
2447 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2448 ring->vmem = (void **)&rxr->rx_agg_ring;
2449
Michael Chan3b2b7d92016-01-02 23:45:00 -05002450skip_rx:
Michael Chanb6ab4b02016-01-02 23:44:59 -05002451 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002452 if (!txr)
2453 continue;
2454
Michael Chanc0c050c2015-10-22 16:01:17 -04002455 ring = &txr->tx_ring_struct;
2456 ring->nr_pages = bp->tx_nr_pages;
2457 ring->page_size = HW_RXBD_RING_SIZE;
2458 ring->pg_arr = (void **)txr->tx_desc_ring;
2459 ring->dma_arr = txr->tx_desc_mapping;
2460 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2461 ring->vmem = (void **)&txr->tx_buf_ring;
2462 }
2463}
2464
2465static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2466{
2467 int i;
2468 u32 prod;
2469 struct rx_bd **rx_buf_ring;
2470
2471 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2472 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2473 int j;
2474 struct rx_bd *rxbd;
2475
2476 rxbd = rx_buf_ring[i];
2477 if (!rxbd)
2478 continue;
2479
2480 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2481 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2482 rxbd->rx_bd_opaque = prod;
2483 }
2484 }
2485}
2486
2487static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2488{
2489 struct net_device *dev = bp->dev;
Michael Chanc0c050c2015-10-22 16:01:17 -04002490 struct bnxt_rx_ring_info *rxr;
2491 struct bnxt_ring_struct *ring;
2492 u32 prod, type;
2493 int i;
2494
Michael Chanc0c050c2015-10-22 16:01:17 -04002495 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2496 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2497
2498 if (NET_IP_ALIGN == 2)
2499 type |= RX_BD_FLAGS_SOP;
2500
Michael Chanb6ab4b02016-01-02 23:44:59 -05002501 rxr = &bp->rx_ring[ring_nr];
Michael Chanc0c050c2015-10-22 16:01:17 -04002502 ring = &rxr->rx_ring_struct;
2503 bnxt_init_rxbd_pages(ring, type);
2504
Michael Chanc6d30e82017-02-06 16:55:42 -05002505 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2506 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2507 if (IS_ERR(rxr->xdp_prog)) {
2508 int rc = PTR_ERR(rxr->xdp_prog);
2509
2510 rxr->xdp_prog = NULL;
2511 return rc;
2512 }
2513 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002514 prod = rxr->rx_prod;
2515 for (i = 0; i < bp->rx_ring_size; i++) {
2516 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2517 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2518 ring_nr, i, bp->rx_ring_size);
2519 break;
2520 }
2521 prod = NEXT_RX(prod);
2522 }
2523 rxr->rx_prod = prod;
2524 ring->fw_ring_id = INVALID_HW_RING_ID;
2525
Michael Chanedd0c2c2015-12-27 18:19:19 -05002526 ring = &rxr->rx_agg_ring_struct;
2527 ring->fw_ring_id = INVALID_HW_RING_ID;
2528
Michael Chanc0c050c2015-10-22 16:01:17 -04002529 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2530 return 0;
2531
Michael Chan2839f282016-04-25 02:30:50 -04002532 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
Michael Chanc0c050c2015-10-22 16:01:17 -04002533 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2534
2535 bnxt_init_rxbd_pages(ring, type);
2536
2537 prod = rxr->rx_agg_prod;
2538 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2539 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2540 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2541 ring_nr, i, bp->rx_ring_size);
2542 break;
2543 }
2544 prod = NEXT_RX_AGG(prod);
2545 }
2546 rxr->rx_agg_prod = prod;
Michael Chanc0c050c2015-10-22 16:01:17 -04002547
2548 if (bp->flags & BNXT_FLAG_TPA) {
2549 if (rxr->rx_tpa) {
2550 u8 *data;
2551 dma_addr_t mapping;
2552
2553 for (i = 0; i < MAX_TPA; i++) {
2554 data = __bnxt_alloc_rx_data(bp, &mapping,
2555 GFP_KERNEL);
2556 if (!data)
2557 return -ENOMEM;
2558
2559 rxr->rx_tpa[i].data = data;
Michael Chanb3dba772017-02-06 16:55:35 -05002560 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
Michael Chanc0c050c2015-10-22 16:01:17 -04002561 rxr->rx_tpa[i].mapping = mapping;
2562 }
2563 } else {
2564 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2565 return -ENOMEM;
2566 }
2567 }
2568
2569 return 0;
2570}
2571
Sankar Patchineelam22479252017-03-28 19:47:29 -04002572static void bnxt_init_cp_rings(struct bnxt *bp)
2573{
2574 int i;
2575
2576 for (i = 0; i < bp->cp_nr_rings; i++) {
2577 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2578 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2579
2580 ring->fw_ring_id = INVALID_HW_RING_ID;
2581 }
2582}
2583
Michael Chanc0c050c2015-10-22 16:01:17 -04002584static int bnxt_init_rx_rings(struct bnxt *bp)
2585{
2586 int i, rc = 0;
2587
Michael Chanc61fb992017-02-06 16:55:36 -05002588 if (BNXT_RX_PAGE_MODE(bp)) {
Michael Chanc6d30e82017-02-06 16:55:42 -05002589 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2590 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
Michael Chanc61fb992017-02-06 16:55:36 -05002591 } else {
2592 bp->rx_offset = BNXT_RX_OFFSET;
2593 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2594 }
Michael Chanb3dba772017-02-06 16:55:35 -05002595
Michael Chanc0c050c2015-10-22 16:01:17 -04002596 for (i = 0; i < bp->rx_nr_rings; i++) {
2597 rc = bnxt_init_one_rx_ring(bp, i);
2598 if (rc)
2599 break;
2600 }
2601
2602 return rc;
2603}
2604
2605static int bnxt_init_tx_rings(struct bnxt *bp)
2606{
2607 u16 i;
2608
2609 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2610 MAX_SKB_FRAGS + 1);
2611
2612 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002613 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002614 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2615
2616 ring->fw_ring_id = INVALID_HW_RING_ID;
2617 }
2618
2619 return 0;
2620}
2621
2622static void bnxt_free_ring_grps(struct bnxt *bp)
2623{
2624 kfree(bp->grp_info);
2625 bp->grp_info = NULL;
2626}
2627
2628static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2629{
2630 int i;
2631
2632 if (irq_re_init) {
2633 bp->grp_info = kcalloc(bp->cp_nr_rings,
2634 sizeof(struct bnxt_ring_grp_info),
2635 GFP_KERNEL);
2636 if (!bp->grp_info)
2637 return -ENOMEM;
2638 }
2639 for (i = 0; i < bp->cp_nr_rings; i++) {
2640 if (irq_re_init)
2641 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2642 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2643 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2644 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2645 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2646 }
2647 return 0;
2648}
2649
2650static void bnxt_free_vnics(struct bnxt *bp)
2651{
2652 kfree(bp->vnic_info);
2653 bp->vnic_info = NULL;
2654 bp->nr_vnics = 0;
2655}
2656
2657static int bnxt_alloc_vnics(struct bnxt *bp)
2658{
2659 int num_vnics = 1;
2660
2661#ifdef CONFIG_RFS_ACCEL
2662 if (bp->flags & BNXT_FLAG_RFS)
2663 num_vnics += bp->rx_nr_rings;
2664#endif
2665
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04002666 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2667 num_vnics++;
2668
Michael Chanc0c050c2015-10-22 16:01:17 -04002669 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2670 GFP_KERNEL);
2671 if (!bp->vnic_info)
2672 return -ENOMEM;
2673
2674 bp->nr_vnics = num_vnics;
2675 return 0;
2676}
2677
2678static void bnxt_init_vnics(struct bnxt *bp)
2679{
2680 int i;
2681
2682 for (i = 0; i < bp->nr_vnics; i++) {
2683 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2684
2685 vnic->fw_vnic_id = INVALID_HW_RING_ID;
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04002686 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2687 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04002688 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2689
2690 if (bp->vnic_info[i].rss_hash_key) {
2691 if (i == 0)
2692 prandom_bytes(vnic->rss_hash_key,
2693 HW_HASH_KEY_SIZE);
2694 else
2695 memcpy(vnic->rss_hash_key,
2696 bp->vnic_info[0].rss_hash_key,
2697 HW_HASH_KEY_SIZE);
2698 }
2699 }
2700}
2701
2702static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2703{
2704 int pages;
2705
2706 pages = ring_size / desc_per_pg;
2707
2708 if (!pages)
2709 return 1;
2710
2711 pages++;
2712
2713 while (pages & (pages - 1))
2714 pages++;
2715
2716 return pages;
2717}
2718
Michael Chanc6d30e82017-02-06 16:55:42 -05002719void bnxt_set_tpa_flags(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04002720{
2721 bp->flags &= ~BNXT_FLAG_TPA;
Michael Chan341138c2017-01-13 01:32:01 -05002722 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
2723 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04002724 if (bp->dev->features & NETIF_F_LRO)
2725 bp->flags |= BNXT_FLAG_LRO;
Michael Chan94758f82016-06-13 02:25:35 -04002726 if (bp->dev->features & NETIF_F_GRO)
Michael Chanc0c050c2015-10-22 16:01:17 -04002727 bp->flags |= BNXT_FLAG_GRO;
2728}
2729
2730/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2731 * be set on entry.
2732 */
2733void bnxt_set_ring_params(struct bnxt *bp)
2734{
2735 u32 ring_size, rx_size, rx_space;
2736 u32 agg_factor = 0, agg_ring_size = 0;
2737
2738 /* 8 for CRC and VLAN */
2739 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2740
2741 rx_space = rx_size + NET_SKB_PAD +
2742 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2743
2744 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2745 ring_size = bp->rx_ring_size;
2746 bp->rx_agg_ring_size = 0;
2747 bp->rx_agg_nr_pages = 0;
2748
2749 if (bp->flags & BNXT_FLAG_TPA)
Michael Chan2839f282016-04-25 02:30:50 -04002750 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
Michael Chanc0c050c2015-10-22 16:01:17 -04002751
2752 bp->flags &= ~BNXT_FLAG_JUMBO;
Michael Chanbdbd1eb2016-12-29 12:13:43 -05002753 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002754 u32 jumbo_factor;
2755
2756 bp->flags |= BNXT_FLAG_JUMBO;
2757 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2758 if (jumbo_factor > agg_factor)
2759 agg_factor = jumbo_factor;
2760 }
2761 agg_ring_size = ring_size * agg_factor;
2762
2763 if (agg_ring_size) {
2764 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2765 RX_DESC_CNT);
2766 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2767 u32 tmp = agg_ring_size;
2768
2769 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2770 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2771 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2772 tmp, agg_ring_size);
2773 }
2774 bp->rx_agg_ring_size = agg_ring_size;
2775 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2776 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2777 rx_space = rx_size + NET_SKB_PAD +
2778 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2779 }
2780
2781 bp->rx_buf_use_size = rx_size;
2782 bp->rx_buf_size = rx_space;
2783
2784 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2785 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2786
2787 ring_size = bp->tx_ring_size;
2788 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2789 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2790
2791 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2792 bp->cp_ring_size = ring_size;
2793
2794 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2795 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2796 bp->cp_nr_pages = MAX_CP_PAGES;
2797 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2798 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2799 ring_size, bp->cp_ring_size);
2800 }
2801 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2802 bp->cp_ring_mask = bp->cp_bit - 1;
2803}
2804
Michael Chanc61fb992017-02-06 16:55:36 -05002805int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
Michael Chan6bb19472017-02-06 16:55:32 -05002806{
Michael Chanc61fb992017-02-06 16:55:36 -05002807 if (page_mode) {
2808 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
2809 return -EOPNOTSUPP;
2810 bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
2811 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
2812 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
2813 bp->dev->hw_features &= ~NETIF_F_LRO;
2814 bp->dev->features &= ~NETIF_F_LRO;
2815 bp->rx_dir = DMA_BIDIRECTIONAL;
2816 bp->rx_skb_func = bnxt_rx_page_skb;
2817 } else {
2818 bp->dev->max_mtu = BNXT_MAX_MTU;
2819 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
2820 bp->rx_dir = DMA_FROM_DEVICE;
2821 bp->rx_skb_func = bnxt_rx_skb;
2822 }
Michael Chan6bb19472017-02-06 16:55:32 -05002823 return 0;
2824}
2825
Michael Chanc0c050c2015-10-22 16:01:17 -04002826static void bnxt_free_vnic_attributes(struct bnxt *bp)
2827{
2828 int i;
2829 struct bnxt_vnic_info *vnic;
2830 struct pci_dev *pdev = bp->pdev;
2831
2832 if (!bp->vnic_info)
2833 return;
2834
2835 for (i = 0; i < bp->nr_vnics; i++) {
2836 vnic = &bp->vnic_info[i];
2837
2838 kfree(vnic->fw_grp_ids);
2839 vnic->fw_grp_ids = NULL;
2840
2841 kfree(vnic->uc_list);
2842 vnic->uc_list = NULL;
2843
2844 if (vnic->mc_list) {
2845 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2846 vnic->mc_list, vnic->mc_list_mapping);
2847 vnic->mc_list = NULL;
2848 }
2849
2850 if (vnic->rss_table) {
2851 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2852 vnic->rss_table,
2853 vnic->rss_table_dma_addr);
2854 vnic->rss_table = NULL;
2855 }
2856
2857 vnic->rss_hash_key = NULL;
2858 vnic->flags = 0;
2859 }
2860}
2861
2862static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2863{
2864 int i, rc = 0, size;
2865 struct bnxt_vnic_info *vnic;
2866 struct pci_dev *pdev = bp->pdev;
2867 int max_rings;
2868
2869 for (i = 0; i < bp->nr_vnics; i++) {
2870 vnic = &bp->vnic_info[i];
2871
2872 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2873 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2874
2875 if (mem_size > 0) {
2876 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2877 if (!vnic->uc_list) {
2878 rc = -ENOMEM;
2879 goto out;
2880 }
2881 }
2882 }
2883
2884 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2885 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2886 vnic->mc_list =
2887 dma_alloc_coherent(&pdev->dev,
2888 vnic->mc_list_size,
2889 &vnic->mc_list_mapping,
2890 GFP_KERNEL);
2891 if (!vnic->mc_list) {
2892 rc = -ENOMEM;
2893 goto out;
2894 }
2895 }
2896
2897 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2898 max_rings = bp->rx_nr_rings;
2899 else
2900 max_rings = 1;
2901
2902 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2903 if (!vnic->fw_grp_ids) {
2904 rc = -ENOMEM;
2905 goto out;
2906 }
2907
Michael Chanae10ae72016-12-29 12:13:38 -05002908 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
2909 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
2910 continue;
2911
Michael Chanc0c050c2015-10-22 16:01:17 -04002912 /* Allocate rss table and hash key */
2913 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2914 &vnic->rss_table_dma_addr,
2915 GFP_KERNEL);
2916 if (!vnic->rss_table) {
2917 rc = -ENOMEM;
2918 goto out;
2919 }
2920
2921 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2922
2923 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2924 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2925 }
2926 return 0;
2927
2928out:
2929 return rc;
2930}
2931
2932static void bnxt_free_hwrm_resources(struct bnxt *bp)
2933{
2934 struct pci_dev *pdev = bp->pdev;
2935
2936 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2937 bp->hwrm_cmd_resp_dma_addr);
2938
2939 bp->hwrm_cmd_resp_addr = NULL;
2940 if (bp->hwrm_dbg_resp_addr) {
2941 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2942 bp->hwrm_dbg_resp_addr,
2943 bp->hwrm_dbg_resp_dma_addr);
2944
2945 bp->hwrm_dbg_resp_addr = NULL;
2946 }
2947}
2948
2949static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2950{
2951 struct pci_dev *pdev = bp->pdev;
2952
2953 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2954 &bp->hwrm_cmd_resp_dma_addr,
2955 GFP_KERNEL);
2956 if (!bp->hwrm_cmd_resp_addr)
2957 return -ENOMEM;
2958 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2959 HWRM_DBG_REG_BUF_SIZE,
2960 &bp->hwrm_dbg_resp_dma_addr,
2961 GFP_KERNEL);
2962 if (!bp->hwrm_dbg_resp_addr)
2963 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2964
2965 return 0;
2966}
2967
Deepak Khungare605db82017-05-29 19:06:04 -04002968static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
2969{
2970 if (bp->hwrm_short_cmd_req_addr) {
2971 struct pci_dev *pdev = bp->pdev;
2972
2973 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
2974 bp->hwrm_short_cmd_req_addr,
2975 bp->hwrm_short_cmd_req_dma_addr);
2976 bp->hwrm_short_cmd_req_addr = NULL;
2977 }
2978}
2979
2980static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
2981{
2982 struct pci_dev *pdev = bp->pdev;
2983
2984 bp->hwrm_short_cmd_req_addr =
2985 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
2986 &bp->hwrm_short_cmd_req_dma_addr,
2987 GFP_KERNEL);
2988 if (!bp->hwrm_short_cmd_req_addr)
2989 return -ENOMEM;
2990
2991 return 0;
2992}
2993
Michael Chanc0c050c2015-10-22 16:01:17 -04002994static void bnxt_free_stats(struct bnxt *bp)
2995{
2996 u32 size, i;
2997 struct pci_dev *pdev = bp->pdev;
2998
Michael Chan3bdf56c2016-03-07 15:38:45 -05002999 if (bp->hw_rx_port_stats) {
3000 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3001 bp->hw_rx_port_stats,
3002 bp->hw_rx_port_stats_map);
3003 bp->hw_rx_port_stats = NULL;
3004 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3005 }
3006
Michael Chanc0c050c2015-10-22 16:01:17 -04003007 if (!bp->bnapi)
3008 return;
3009
3010 size = sizeof(struct ctx_hw_stats);
3011
3012 for (i = 0; i < bp->cp_nr_rings; i++) {
3013 struct bnxt_napi *bnapi = bp->bnapi[i];
3014 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3015
3016 if (cpr->hw_stats) {
3017 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3018 cpr->hw_stats_map);
3019 cpr->hw_stats = NULL;
3020 }
3021 }
3022}
3023
3024static int bnxt_alloc_stats(struct bnxt *bp)
3025{
3026 u32 size, i;
3027 struct pci_dev *pdev = bp->pdev;
3028
3029 size = sizeof(struct ctx_hw_stats);
3030
3031 for (i = 0; i < bp->cp_nr_rings; i++) {
3032 struct bnxt_napi *bnapi = bp->bnapi[i];
3033 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3034
3035 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3036 &cpr->hw_stats_map,
3037 GFP_KERNEL);
3038 if (!cpr->hw_stats)
3039 return -ENOMEM;
3040
3041 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3042 }
Michael Chan3bdf56c2016-03-07 15:38:45 -05003043
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04003044 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05003045 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3046 sizeof(struct tx_port_stats) + 1024;
3047
3048 bp->hw_rx_port_stats =
3049 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3050 &bp->hw_rx_port_stats_map,
3051 GFP_KERNEL);
3052 if (!bp->hw_rx_port_stats)
3053 return -ENOMEM;
3054
3055 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3056 512;
3057 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3058 sizeof(struct rx_port_stats) + 512;
3059 bp->flags |= BNXT_FLAG_PORT_STATS;
3060 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003061 return 0;
3062}
3063
3064static void bnxt_clear_ring_indices(struct bnxt *bp)
3065{
3066 int i;
3067
3068 if (!bp->bnapi)
3069 return;
3070
3071 for (i = 0; i < bp->cp_nr_rings; i++) {
3072 struct bnxt_napi *bnapi = bp->bnapi[i];
3073 struct bnxt_cp_ring_info *cpr;
3074 struct bnxt_rx_ring_info *rxr;
3075 struct bnxt_tx_ring_info *txr;
3076
3077 if (!bnapi)
3078 continue;
3079
3080 cpr = &bnapi->cp_ring;
3081 cpr->cp_raw_cons = 0;
3082
Michael Chanb6ab4b02016-01-02 23:44:59 -05003083 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05003084 if (txr) {
3085 txr->tx_prod = 0;
3086 txr->tx_cons = 0;
3087 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003088
Michael Chanb6ab4b02016-01-02 23:44:59 -05003089 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05003090 if (rxr) {
3091 rxr->rx_prod = 0;
3092 rxr->rx_agg_prod = 0;
3093 rxr->rx_sw_agg_prod = 0;
Michael Chan376a5b82016-05-10 19:17:59 -04003094 rxr->rx_next_cons = 0;
Michael Chan3b2b7d92016-01-02 23:45:00 -05003095 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003096 }
3097}
3098
3099static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3100{
3101#ifdef CONFIG_RFS_ACCEL
3102 int i;
3103
3104 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3105 * safe to delete the hash table.
3106 */
3107 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3108 struct hlist_head *head;
3109 struct hlist_node *tmp;
3110 struct bnxt_ntuple_filter *fltr;
3111
3112 head = &bp->ntp_fltr_hash_tbl[i];
3113 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3114 hlist_del(&fltr->hash);
3115 kfree(fltr);
3116 }
3117 }
3118 if (irq_reinit) {
3119 kfree(bp->ntp_fltr_bmap);
3120 bp->ntp_fltr_bmap = NULL;
3121 }
3122 bp->ntp_fltr_count = 0;
3123#endif
3124}
3125
3126static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3127{
3128#ifdef CONFIG_RFS_ACCEL
3129 int i, rc = 0;
3130
3131 if (!(bp->flags & BNXT_FLAG_RFS))
3132 return 0;
3133
3134 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3135 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3136
3137 bp->ntp_fltr_count = 0;
Dan Carpenterac45bd92017-05-06 03:49:01 +03003138 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3139 sizeof(long),
Michael Chanc0c050c2015-10-22 16:01:17 -04003140 GFP_KERNEL);
3141
3142 if (!bp->ntp_fltr_bmap)
3143 rc = -ENOMEM;
3144
3145 return rc;
3146#else
3147 return 0;
3148#endif
3149}
3150
3151static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3152{
3153 bnxt_free_vnic_attributes(bp);
3154 bnxt_free_tx_rings(bp);
3155 bnxt_free_rx_rings(bp);
3156 bnxt_free_cp_rings(bp);
3157 bnxt_free_ntp_fltrs(bp, irq_re_init);
3158 if (irq_re_init) {
3159 bnxt_free_stats(bp);
3160 bnxt_free_ring_grps(bp);
3161 bnxt_free_vnics(bp);
Michael Chana960dec2017-02-06 16:55:39 -05003162 kfree(bp->tx_ring_map);
3163 bp->tx_ring_map = NULL;
Michael Chanb6ab4b02016-01-02 23:44:59 -05003164 kfree(bp->tx_ring);
3165 bp->tx_ring = NULL;
3166 kfree(bp->rx_ring);
3167 bp->rx_ring = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04003168 kfree(bp->bnapi);
3169 bp->bnapi = NULL;
3170 } else {
3171 bnxt_clear_ring_indices(bp);
3172 }
3173}
3174
3175static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3176{
Michael Chan01657bc2016-01-02 23:45:03 -05003177 int i, j, rc, size, arr_size;
Michael Chanc0c050c2015-10-22 16:01:17 -04003178 void *bnapi;
3179
3180 if (irq_re_init) {
3181 /* Allocate bnapi mem pointer array and mem block for
3182 * all queues
3183 */
3184 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3185 bp->cp_nr_rings);
3186 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3187 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3188 if (!bnapi)
3189 return -ENOMEM;
3190
3191 bp->bnapi = bnapi;
3192 bnapi += arr_size;
3193 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3194 bp->bnapi[i] = bnapi;
3195 bp->bnapi[i]->index = i;
3196 bp->bnapi[i]->bp = bp;
3197 }
3198
Michael Chanb6ab4b02016-01-02 23:44:59 -05003199 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3200 sizeof(struct bnxt_rx_ring_info),
3201 GFP_KERNEL);
3202 if (!bp->rx_ring)
3203 return -ENOMEM;
3204
3205 for (i = 0; i < bp->rx_nr_rings; i++) {
3206 bp->rx_ring[i].bnapi = bp->bnapi[i];
3207 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3208 }
3209
3210 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3211 sizeof(struct bnxt_tx_ring_info),
3212 GFP_KERNEL);
3213 if (!bp->tx_ring)
3214 return -ENOMEM;
3215
Michael Chana960dec2017-02-06 16:55:39 -05003216 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3217 GFP_KERNEL);
3218
3219 if (!bp->tx_ring_map)
3220 return -ENOMEM;
3221
Michael Chan01657bc2016-01-02 23:45:03 -05003222 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3223 j = 0;
3224 else
3225 j = bp->rx_nr_rings;
3226
3227 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3228 bp->tx_ring[i].bnapi = bp->bnapi[j];
3229 bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
Michael Chan5f449242017-02-06 16:55:40 -05003230 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
Michael Chan38413402017-02-06 16:55:43 -05003231 if (i >= bp->tx_nr_rings_xdp) {
Michael Chan5f449242017-02-06 16:55:40 -05003232 bp->tx_ring[i].txq_index = i -
3233 bp->tx_nr_rings_xdp;
Michael Chan38413402017-02-06 16:55:43 -05003234 bp->bnapi[j]->tx_int = bnxt_tx_int;
3235 } else {
Michael Chanfa3e93e2017-02-06 16:55:41 -05003236 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
Michael Chan38413402017-02-06 16:55:43 -05003237 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3238 }
Michael Chanb6ab4b02016-01-02 23:44:59 -05003239 }
3240
Michael Chanc0c050c2015-10-22 16:01:17 -04003241 rc = bnxt_alloc_stats(bp);
3242 if (rc)
3243 goto alloc_mem_err;
3244
3245 rc = bnxt_alloc_ntp_fltrs(bp);
3246 if (rc)
3247 goto alloc_mem_err;
3248
3249 rc = bnxt_alloc_vnics(bp);
3250 if (rc)
3251 goto alloc_mem_err;
3252 }
3253
3254 bnxt_init_ring_struct(bp);
3255
3256 rc = bnxt_alloc_rx_rings(bp);
3257 if (rc)
3258 goto alloc_mem_err;
3259
3260 rc = bnxt_alloc_tx_rings(bp);
3261 if (rc)
3262 goto alloc_mem_err;
3263
3264 rc = bnxt_alloc_cp_rings(bp);
3265 if (rc)
3266 goto alloc_mem_err;
3267
3268 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3269 BNXT_VNIC_UCAST_FLAG;
3270 rc = bnxt_alloc_vnic_attributes(bp);
3271 if (rc)
3272 goto alloc_mem_err;
3273 return 0;
3274
3275alloc_mem_err:
3276 bnxt_free_mem(bp, true);
3277 return rc;
3278}
3279
Michael Chan9d8bc092016-12-29 12:13:33 -05003280static void bnxt_disable_int(struct bnxt *bp)
3281{
3282 int i;
3283
3284 if (!bp->bnapi)
3285 return;
3286
3287 for (i = 0; i < bp->cp_nr_rings; i++) {
3288 struct bnxt_napi *bnapi = bp->bnapi[i];
3289 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chandaf1f1e2017-02-20 19:25:17 -05003290 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chan9d8bc092016-12-29 12:13:33 -05003291
Michael Chandaf1f1e2017-02-20 19:25:17 -05003292 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3293 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
Michael Chan9d8bc092016-12-29 12:13:33 -05003294 }
3295}
3296
3297static void bnxt_disable_int_sync(struct bnxt *bp)
3298{
3299 int i;
3300
3301 atomic_inc(&bp->intr_sem);
3302
3303 bnxt_disable_int(bp);
3304 for (i = 0; i < bp->cp_nr_rings; i++)
3305 synchronize_irq(bp->irq_tbl[i].vector);
3306}
3307
3308static void bnxt_enable_int(struct bnxt *bp)
3309{
3310 int i;
3311
3312 atomic_set(&bp->intr_sem, 0);
3313 for (i = 0; i < bp->cp_nr_rings; i++) {
3314 struct bnxt_napi *bnapi = bp->bnapi[i];
3315 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3316
3317 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
3318 }
3319}
3320
Michael Chanc0c050c2015-10-22 16:01:17 -04003321void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3322 u16 cmpl_ring, u16 target_id)
3323{
Michael Chana8643e12016-02-26 04:00:05 -05003324 struct input *req = request;
Michael Chanc0c050c2015-10-22 16:01:17 -04003325
Michael Chana8643e12016-02-26 04:00:05 -05003326 req->req_type = cpu_to_le16(req_type);
3327 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3328 req->target_id = cpu_to_le16(target_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003329 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3330}
3331
Michael Chanfbfbc482016-02-26 04:00:07 -05003332static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3333 int timeout, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003334{
Michael Chana11fa2b2016-05-15 03:04:47 -04003335 int i, intr_process, rc, tmo_count;
Michael Chana8643e12016-02-26 04:00:05 -05003336 struct input *req = msg;
Michael Chanc0c050c2015-10-22 16:01:17 -04003337 u32 *data = msg;
3338 __le32 *resp_len, *valid;
3339 u16 cp_ring_id, len = 0;
3340 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
Deepak Khungare605db82017-05-29 19:06:04 -04003341 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
Michael Chanc0c050c2015-10-22 16:01:17 -04003342
Michael Chana8643e12016-02-26 04:00:05 -05003343 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
Michael Chanc0c050c2015-10-22 16:01:17 -04003344 memset(resp, 0, PAGE_SIZE);
Michael Chana8643e12016-02-26 04:00:05 -05003345 cp_ring_id = le16_to_cpu(req->cmpl_ring);
Michael Chanc0c050c2015-10-22 16:01:17 -04003346 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3347
Deepak Khungare605db82017-05-29 19:06:04 -04003348 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
3349 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3350 struct hwrm_short_input short_input = {0};
3351
3352 memcpy(short_cmd_req, req, msg_len);
3353 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
3354 msg_len);
3355
3356 short_input.req_type = req->req_type;
3357 short_input.signature =
3358 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3359 short_input.size = cpu_to_le16(msg_len);
3360 short_input.req_addr =
3361 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3362
3363 data = (u32 *)&short_input;
3364 msg_len = sizeof(short_input);
3365
3366 /* Sync memory write before updating doorbell */
3367 wmb();
3368
3369 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3370 }
3371
Michael Chanc0c050c2015-10-22 16:01:17 -04003372 /* Write request msg to hwrm channel */
3373 __iowrite32_copy(bp->bar0, data, msg_len / 4);
3374
Deepak Khungare605db82017-05-29 19:06:04 -04003375 for (i = msg_len; i < max_req_len; i += 4)
Michael Chand79979a2016-01-07 19:56:57 -05003376 writel(0, bp->bar0 + i);
3377
Michael Chanc0c050c2015-10-22 16:01:17 -04003378 /* currently supports only one outstanding message */
3379 if (intr_process)
Michael Chana8643e12016-02-26 04:00:05 -05003380 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003381
3382 /* Ring channel doorbell */
3383 writel(1, bp->bar0 + 0x100);
3384
Michael Chanff4fe812016-02-26 04:00:04 -05003385 if (!timeout)
3386 timeout = DFLT_HWRM_CMD_TIMEOUT;
3387
Michael Chanc0c050c2015-10-22 16:01:17 -04003388 i = 0;
Michael Chana11fa2b2016-05-15 03:04:47 -04003389 tmo_count = timeout * 40;
Michael Chanc0c050c2015-10-22 16:01:17 -04003390 if (intr_process) {
3391 /* Wait until hwrm response cmpl interrupt is processed */
3392 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
Michael Chana11fa2b2016-05-15 03:04:47 -04003393 i++ < tmo_count) {
3394 usleep_range(25, 40);
Michael Chanc0c050c2015-10-22 16:01:17 -04003395 }
3396
3397 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3398 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
Michael Chana8643e12016-02-26 04:00:05 -05003399 le16_to_cpu(req->req_type));
Michael Chanc0c050c2015-10-22 16:01:17 -04003400 return -1;
3401 }
3402 } else {
3403 /* Check if response len is updated */
3404 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
Michael Chana11fa2b2016-05-15 03:04:47 -04003405 for (i = 0; i < tmo_count; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003406 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3407 HWRM_RESP_LEN_SFT;
3408 if (len)
3409 break;
Michael Chana11fa2b2016-05-15 03:04:47 -04003410 usleep_range(25, 40);
Michael Chanc0c050c2015-10-22 16:01:17 -04003411 }
3412
Michael Chana11fa2b2016-05-15 03:04:47 -04003413 if (i >= tmo_count) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003414 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
Michael Chana8643e12016-02-26 04:00:05 -05003415 timeout, le16_to_cpu(req->req_type),
Michael Chan8578d6c2016-05-15 03:04:48 -04003416 le16_to_cpu(req->seq_id), len);
Michael Chanc0c050c2015-10-22 16:01:17 -04003417 return -1;
3418 }
3419
3420 /* Last word of resp contains valid bit */
3421 valid = bp->hwrm_cmd_resp_addr + len - 4;
Michael Chana11fa2b2016-05-15 03:04:47 -04003422 for (i = 0; i < 5; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003423 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3424 break;
Michael Chana11fa2b2016-05-15 03:04:47 -04003425 udelay(1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003426 }
3427
Michael Chana11fa2b2016-05-15 03:04:47 -04003428 if (i >= 5) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003429 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
Michael Chana8643e12016-02-26 04:00:05 -05003430 timeout, le16_to_cpu(req->req_type),
3431 le16_to_cpu(req->seq_id), len, *valid);
Michael Chanc0c050c2015-10-22 16:01:17 -04003432 return -1;
3433 }
3434 }
3435
3436 rc = le16_to_cpu(resp->error_code);
Michael Chanfbfbc482016-02-26 04:00:07 -05003437 if (rc && !silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003438 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3439 le16_to_cpu(resp->req_type),
3440 le16_to_cpu(resp->seq_id), rc);
Michael Chanfbfbc482016-02-26 04:00:07 -05003441 return rc;
3442}
3443
3444int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3445{
3446 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04003447}
3448
3449int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3450{
3451 int rc;
3452
3453 mutex_lock(&bp->hwrm_cmd_lock);
3454 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3455 mutex_unlock(&bp->hwrm_cmd_lock);
3456 return rc;
3457}
3458
Michael Chan90e209212016-02-26 04:00:08 -05003459int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3460 int timeout)
3461{
3462 int rc;
3463
3464 mutex_lock(&bp->hwrm_cmd_lock);
3465 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3466 mutex_unlock(&bp->hwrm_cmd_lock);
3467 return rc;
3468}
3469
Michael Chana1653b12016-12-07 00:26:20 -05003470int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3471 int bmap_size)
Michael Chanc0c050c2015-10-22 16:01:17 -04003472{
3473 struct hwrm_func_drv_rgtr_input req = {0};
Michael Chan25be8622016-04-05 14:09:00 -04003474 DECLARE_BITMAP(async_events_bmap, 256);
3475 u32 *events = (u32 *)async_events_bmap;
Michael Chana1653b12016-12-07 00:26:20 -05003476 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003477
3478 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3479
3480 req.enables =
Michael Chana1653b12016-12-07 00:26:20 -05003481 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
Michael Chanc0c050c2015-10-22 16:01:17 -04003482
Michael Chan25be8622016-04-05 14:09:00 -04003483 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3484 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3485 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3486
Michael Chana1653b12016-12-07 00:26:20 -05003487 if (bmap && bmap_size) {
3488 for (i = 0; i < bmap_size; i++) {
3489 if (test_bit(i, bmap))
3490 __set_bit(i, async_events_bmap);
3491 }
3492 }
3493
Michael Chan25be8622016-04-05 14:09:00 -04003494 for (i = 0; i < 8; i++)
3495 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3496
Michael Chana1653b12016-12-07 00:26:20 -05003497 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3498}
3499
3500static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3501{
3502 struct hwrm_func_drv_rgtr_input req = {0};
3503
3504 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3505
3506 req.enables =
3507 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3508 FUNC_DRV_RGTR_REQ_ENABLES_VER);
3509
Michael Chan11f15ed2016-04-05 14:08:55 -04003510 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
Michael Chanc0c050c2015-10-22 16:01:17 -04003511 req.ver_maj = DRV_VER_MAJ;
3512 req.ver_min = DRV_VER_MIN;
3513 req.ver_upd = DRV_VER_UPD;
3514
3515 if (BNXT_PF(bp)) {
Michael Chan9b0436c2017-07-11 13:05:36 -04003516 u32 data[8];
Michael Chana1653b12016-12-07 00:26:20 -05003517 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003518
Michael Chan9b0436c2017-07-11 13:05:36 -04003519 memset(data, 0, sizeof(data));
3520 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
3521 u16 cmd = bnxt_vf_req_snif[i];
3522 unsigned int bit, idx;
3523
3524 idx = cmd / 32;
3525 bit = cmd % 32;
3526 data[idx] |= 1 << bit;
3527 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003528
Michael Chande68f5de2015-12-09 19:35:41 -05003529 for (i = 0; i < 8; i++)
3530 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3531
Michael Chanc0c050c2015-10-22 16:01:17 -04003532 req.enables |=
3533 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3534 }
3535
3536 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3537}
3538
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05003539static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3540{
3541 struct hwrm_func_drv_unrgtr_input req = {0};
3542
3543 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3544 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3545}
3546
Michael Chanc0c050c2015-10-22 16:01:17 -04003547static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3548{
3549 u32 rc = 0;
3550 struct hwrm_tunnel_dst_port_free_input req = {0};
3551
3552 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3553 req.tunnel_type = tunnel_type;
3554
3555 switch (tunnel_type) {
3556 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3557 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3558 break;
3559 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3560 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3561 break;
3562 default:
3563 break;
3564 }
3565
3566 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3567 if (rc)
3568 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3569 rc);
3570 return rc;
3571}
3572
3573static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3574 u8 tunnel_type)
3575{
3576 u32 rc = 0;
3577 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3578 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3579
3580 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3581
3582 req.tunnel_type = tunnel_type;
3583 req.tunnel_dst_port_val = port;
3584
3585 mutex_lock(&bp->hwrm_cmd_lock);
3586 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3587 if (rc) {
3588 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3589 rc);
3590 goto err_out;
3591 }
3592
Christophe Jaillet57aac712016-11-22 06:14:40 +01003593 switch (tunnel_type) {
3594 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
Michael Chanc0c050c2015-10-22 16:01:17 -04003595 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01003596 break;
3597 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
Michael Chanc0c050c2015-10-22 16:01:17 -04003598 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01003599 break;
3600 default:
3601 break;
3602 }
3603
Michael Chanc0c050c2015-10-22 16:01:17 -04003604err_out:
3605 mutex_unlock(&bp->hwrm_cmd_lock);
3606 return rc;
3607}
3608
3609static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3610{
3611 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3612 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3613
3614 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -05003615 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003616
3617 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3618 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3619 req.mask = cpu_to_le32(vnic->rx_mask);
3620 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3621}
3622
3623#ifdef CONFIG_RFS_ACCEL
3624static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3625 struct bnxt_ntuple_filter *fltr)
3626{
3627 struct hwrm_cfa_ntuple_filter_free_input req = {0};
3628
3629 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3630 req.ntuple_filter_id = fltr->filter_id;
3631 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3632}
3633
3634#define BNXT_NTP_FLTR_FLAGS \
3635 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
3636 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
3637 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
3638 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
3639 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
3640 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
3641 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
3642 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
3643 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
3644 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
3645 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
3646 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
3647 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
Michael Chanc1935542015-12-27 18:19:28 -05003648 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04003649
Michael Chan61aad722017-02-12 19:18:14 -05003650#define BNXT_NTP_TUNNEL_FLTR_FLAG \
3651 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
3652
Michael Chanc0c050c2015-10-22 16:01:17 -04003653static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3654 struct bnxt_ntuple_filter *fltr)
3655{
3656 int rc = 0;
3657 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3658 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3659 bp->hwrm_cmd_resp_addr;
3660 struct flow_keys *keys = &fltr->fkeys;
3661 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3662
3663 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
Michael Chana54c4d72016-07-25 12:33:35 -04003664 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04003665
3666 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3667
3668 req.ethertype = htons(ETH_P_IP);
3669 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
Michael Chanc1935542015-12-27 18:19:28 -05003670 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
Michael Chanc0c050c2015-10-22 16:01:17 -04003671 req.ip_protocol = keys->basic.ip_proto;
3672
Michael Chandda0e742016-12-29 12:13:40 -05003673 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
3674 int i;
3675
3676 req.ethertype = htons(ETH_P_IPV6);
3677 req.ip_addr_type =
3678 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
3679 *(struct in6_addr *)&req.src_ipaddr[0] =
3680 keys->addrs.v6addrs.src;
3681 *(struct in6_addr *)&req.dst_ipaddr[0] =
3682 keys->addrs.v6addrs.dst;
3683 for (i = 0; i < 4; i++) {
3684 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3685 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3686 }
3687 } else {
3688 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3689 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3690 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3691 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3692 }
Michael Chan61aad722017-02-12 19:18:14 -05003693 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
3694 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
3695 req.tunnel_type =
3696 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
3697 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003698
3699 req.src_port = keys->ports.src;
3700 req.src_port_mask = cpu_to_be16(0xffff);
3701 req.dst_port = keys->ports.dst;
3702 req.dst_port_mask = cpu_to_be16(0xffff);
3703
Michael Chanc1935542015-12-27 18:19:28 -05003704 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003705 mutex_lock(&bp->hwrm_cmd_lock);
3706 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3707 if (!rc)
3708 fltr->filter_id = resp->ntuple_filter_id;
3709 mutex_unlock(&bp->hwrm_cmd_lock);
3710 return rc;
3711}
3712#endif
3713
3714static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3715 u8 *mac_addr)
3716{
3717 u32 rc = 0;
3718 struct hwrm_cfa_l2_filter_alloc_input req = {0};
3719 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3720
3721 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003722 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3723 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3724 req.flags |=
3725 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
Michael Chanc1935542015-12-27 18:19:28 -05003726 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003727 req.enables =
3728 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
Michael Chanc1935542015-12-27 18:19:28 -05003729 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
Michael Chanc0c050c2015-10-22 16:01:17 -04003730 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3731 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3732 req.l2_addr_mask[0] = 0xff;
3733 req.l2_addr_mask[1] = 0xff;
3734 req.l2_addr_mask[2] = 0xff;
3735 req.l2_addr_mask[3] = 0xff;
3736 req.l2_addr_mask[4] = 0xff;
3737 req.l2_addr_mask[5] = 0xff;
3738
3739 mutex_lock(&bp->hwrm_cmd_lock);
3740 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3741 if (!rc)
3742 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3743 resp->l2_filter_id;
3744 mutex_unlock(&bp->hwrm_cmd_lock);
3745 return rc;
3746}
3747
3748static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3749{
3750 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3751 int rc = 0;
3752
3753 /* Any associated ntuple filters will also be cleared by firmware. */
3754 mutex_lock(&bp->hwrm_cmd_lock);
3755 for (i = 0; i < num_of_vnics; i++) {
3756 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3757
3758 for (j = 0; j < vnic->uc_filter_count; j++) {
3759 struct hwrm_cfa_l2_filter_free_input req = {0};
3760
3761 bnxt_hwrm_cmd_hdr_init(bp, &req,
3762 HWRM_CFA_L2_FILTER_FREE, -1, -1);
3763
3764 req.l2_filter_id = vnic->fw_l2_filter_id[j];
3765
3766 rc = _hwrm_send_message(bp, &req, sizeof(req),
3767 HWRM_CMD_TIMEOUT);
3768 }
3769 vnic->uc_filter_count = 0;
3770 }
3771 mutex_unlock(&bp->hwrm_cmd_lock);
3772
3773 return rc;
3774}
3775
3776static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3777{
3778 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3779 struct hwrm_vnic_tpa_cfg_input req = {0};
3780
3781 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3782
3783 if (tpa_flags) {
3784 u16 mss = bp->dev->mtu - 40;
3785 u32 nsegs, n, segs = 0, flags;
3786
3787 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3788 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3789 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3790 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3791 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3792 if (tpa_flags & BNXT_FLAG_GRO)
3793 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3794
3795 req.flags = cpu_to_le32(flags);
3796
3797 req.enables =
3798 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
Michael Chanc1935542015-12-27 18:19:28 -05003799 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3800 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04003801
3802 /* Number of segs are log2 units, and first packet is not
3803 * included as part of this units.
3804 */
Michael Chan2839f282016-04-25 02:30:50 -04003805 if (mss <= BNXT_RX_PAGE_SIZE) {
3806 n = BNXT_RX_PAGE_SIZE / mss;
Michael Chanc0c050c2015-10-22 16:01:17 -04003807 nsegs = (MAX_SKB_FRAGS - 1) * n;
3808 } else {
Michael Chan2839f282016-04-25 02:30:50 -04003809 n = mss / BNXT_RX_PAGE_SIZE;
3810 if (mss & (BNXT_RX_PAGE_SIZE - 1))
Michael Chanc0c050c2015-10-22 16:01:17 -04003811 n++;
3812 nsegs = (MAX_SKB_FRAGS - n) / n;
3813 }
3814
3815 segs = ilog2(nsegs);
3816 req.max_agg_segs = cpu_to_le16(segs);
3817 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
Michael Chanc1935542015-12-27 18:19:28 -05003818
3819 req.min_agg_len = cpu_to_le32(512);
Michael Chanc0c050c2015-10-22 16:01:17 -04003820 }
3821 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3822
3823 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3824}
3825
3826static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3827{
3828 u32 i, j, max_rings;
3829 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3830 struct hwrm_vnic_rss_cfg_input req = {0};
3831
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003832 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04003833 return 0;
3834
3835 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3836 if (set_rss) {
Michael Chan87da7f72016-11-16 21:13:09 -05003837 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003838 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3839 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3840 max_rings = bp->rx_nr_rings - 1;
3841 else
3842 max_rings = bp->rx_nr_rings;
3843 } else {
Michael Chanc0c050c2015-10-22 16:01:17 -04003844 max_rings = 1;
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003845 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003846
3847 /* Fill the RSS indirection table with ring group ids */
3848 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3849 if (j == max_rings)
3850 j = 0;
3851 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3852 }
3853
3854 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3855 req.hash_key_tbl_addr =
3856 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3857 }
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003858 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
Michael Chanc0c050c2015-10-22 16:01:17 -04003859 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3860}
3861
3862static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3863{
3864 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3865 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3866
3867 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3868 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3869 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3870 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3871 req.enables =
3872 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3873 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3874 /* thresholds not implemented in firmware yet */
3875 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3876 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3877 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3878 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3879}
3880
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003881static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3882 u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04003883{
3884 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3885
3886 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3887 req.rss_cos_lb_ctx_id =
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003888 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
Michael Chanc0c050c2015-10-22 16:01:17 -04003889
3890 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003891 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003892}
3893
3894static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3895{
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003896 int i, j;
Michael Chanc0c050c2015-10-22 16:01:17 -04003897
3898 for (i = 0; i < bp->nr_vnics; i++) {
3899 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3900
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003901 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3902 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3903 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3904 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003905 }
3906 bp->rsscos_nr_ctxs = 0;
3907}
3908
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003909static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04003910{
3911 int rc;
3912 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3913 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3914 bp->hwrm_cmd_resp_addr;
3915
3916 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3917 -1);
3918
3919 mutex_lock(&bp->hwrm_cmd_lock);
3920 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3921 if (!rc)
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003922 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
Michael Chanc0c050c2015-10-22 16:01:17 -04003923 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3924 mutex_unlock(&bp->hwrm_cmd_lock);
3925
3926 return rc;
3927}
3928
Michael Chana588e452016-12-07 00:26:21 -05003929int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
Michael Chanc0c050c2015-10-22 16:01:17 -04003930{
Michael Chanb81a90d2016-01-02 23:45:01 -05003931 unsigned int ring = 0, grp_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04003932 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3933 struct hwrm_vnic_cfg_input req = {0};
Michael Chancf6645f2016-06-13 02:25:28 -04003934 u16 def_vlan = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003935
3936 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003937
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003938 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
3939 /* Only RSS support for now TBD: COS & LB */
3940 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3941 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3942 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3943 VNIC_CFG_REQ_ENABLES_MRU);
Michael Chanae10ae72016-12-29 12:13:38 -05003944 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
3945 req.rss_rule =
3946 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
3947 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3948 VNIC_CFG_REQ_ENABLES_MRU);
3949 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003950 } else {
3951 req.rss_rule = cpu_to_le16(0xffff);
3952 }
3953
3954 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3955 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003956 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3957 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3958 } else {
3959 req.cos_rule = cpu_to_le16(0xffff);
3960 }
3961
Michael Chanc0c050c2015-10-22 16:01:17 -04003962 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05003963 ring = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003964 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05003965 ring = vnic_id - 1;
Prashant Sreedharan76595192016-07-18 07:15:22 -04003966 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
3967 ring = bp->rx_nr_rings - 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04003968
Michael Chanb81a90d2016-01-02 23:45:01 -05003969 grp_idx = bp->rx_ring[ring].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003970 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3971 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3972
3973 req.lb_rule = cpu_to_le16(0xffff);
3974 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3975 VLAN_HLEN);
3976
Michael Chancf6645f2016-06-13 02:25:28 -04003977#ifdef CONFIG_BNXT_SRIOV
3978 if (BNXT_VF(bp))
3979 def_vlan = bp->vf.vlan;
3980#endif
3981 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
Michael Chanc0c050c2015-10-22 16:01:17 -04003982 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
Michael Chana588e452016-12-07 00:26:21 -05003983 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
3984 req.flags |=
3985 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
Michael Chanc0c050c2015-10-22 16:01:17 -04003986
3987 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3988}
3989
3990static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3991{
3992 u32 rc = 0;
3993
3994 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3995 struct hwrm_vnic_free_input req = {0};
3996
3997 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3998 req.vnic_id =
3999 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4000
4001 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4002 if (rc)
4003 return rc;
4004 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4005 }
4006 return rc;
4007}
4008
4009static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4010{
4011 u16 i;
4012
4013 for (i = 0; i < bp->nr_vnics; i++)
4014 bnxt_hwrm_vnic_free_one(bp, i);
4015}
4016
Michael Chanb81a90d2016-01-02 23:45:01 -05004017static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4018 unsigned int start_rx_ring_idx,
4019 unsigned int nr_rings)
Michael Chanc0c050c2015-10-22 16:01:17 -04004020{
Michael Chanb81a90d2016-01-02 23:45:01 -05004021 int rc = 0;
4022 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004023 struct hwrm_vnic_alloc_input req = {0};
4024 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4025
4026 /* map ring groups to this vnic */
Michael Chanb81a90d2016-01-02 23:45:01 -05004027 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4028 grp_idx = bp->rx_ring[i].bnapi->index;
4029 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004030 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05004031 j, nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004032 break;
4033 }
4034 bp->vnic_info[vnic_id].fw_grp_ids[j] =
Michael Chanb81a90d2016-01-02 23:45:01 -05004035 bp->grp_info[grp_idx].fw_grp_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004036 }
4037
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004038 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
4039 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004040 if (vnic_id == 0)
4041 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4042
4043 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4044
4045 mutex_lock(&bp->hwrm_cmd_lock);
4046 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4047 if (!rc)
4048 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
4049 mutex_unlock(&bp->hwrm_cmd_lock);
4050 return rc;
4051}
4052
Michael Chan8fdefd62016-12-29 12:13:36 -05004053static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4054{
4055 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4056 struct hwrm_vnic_qcaps_input req = {0};
4057 int rc;
4058
4059 if (bp->hwrm_spec_code < 0x10600)
4060 return 0;
4061
4062 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4063 mutex_lock(&bp->hwrm_cmd_lock);
4064 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4065 if (!rc) {
4066 if (resp->flags &
4067 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4068 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4069 }
4070 mutex_unlock(&bp->hwrm_cmd_lock);
4071 return rc;
4072}
4073
Michael Chanc0c050c2015-10-22 16:01:17 -04004074static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4075{
4076 u16 i;
4077 u32 rc = 0;
4078
4079 mutex_lock(&bp->hwrm_cmd_lock);
4080 for (i = 0; i < bp->rx_nr_rings; i++) {
4081 struct hwrm_ring_grp_alloc_input req = {0};
4082 struct hwrm_ring_grp_alloc_output *resp =
4083 bp->hwrm_cmd_resp_addr;
Michael Chanb81a90d2016-01-02 23:45:01 -05004084 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04004085
4086 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4087
Michael Chanb81a90d2016-01-02 23:45:01 -05004088 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4089 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4090 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4091 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004092
4093 rc = _hwrm_send_message(bp, &req, sizeof(req),
4094 HWRM_CMD_TIMEOUT);
4095 if (rc)
4096 break;
4097
Michael Chanb81a90d2016-01-02 23:45:01 -05004098 bp->grp_info[grp_idx].fw_grp_id =
4099 le32_to_cpu(resp->ring_group_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004100 }
4101 mutex_unlock(&bp->hwrm_cmd_lock);
4102 return rc;
4103}
4104
4105static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4106{
4107 u16 i;
4108 u32 rc = 0;
4109 struct hwrm_ring_grp_free_input req = {0};
4110
4111 if (!bp->grp_info)
4112 return 0;
4113
4114 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4115
4116 mutex_lock(&bp->hwrm_cmd_lock);
4117 for (i = 0; i < bp->cp_nr_rings; i++) {
4118 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4119 continue;
4120 req.ring_group_id =
4121 cpu_to_le32(bp->grp_info[i].fw_grp_id);
4122
4123 rc = _hwrm_send_message(bp, &req, sizeof(req),
4124 HWRM_CMD_TIMEOUT);
4125 if (rc)
4126 break;
4127 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4128 }
4129 mutex_unlock(&bp->hwrm_cmd_lock);
4130 return rc;
4131}
4132
4133static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4134 struct bnxt_ring_struct *ring,
4135 u32 ring_type, u32 map_index,
4136 u32 stats_ctx_id)
4137{
4138 int rc = 0, err = 0;
4139 struct hwrm_ring_alloc_input req = {0};
4140 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4141 u16 ring_id;
4142
4143 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4144
4145 req.enables = 0;
4146 if (ring->nr_pages > 1) {
4147 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
4148 /* Page size is in log2 units */
4149 req.page_size = BNXT_PAGE_SHIFT;
4150 req.page_tbl_depth = 1;
4151 } else {
4152 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
4153 }
4154 req.fbo = 0;
4155 /* Association of ring index with doorbell index and MSIX number */
4156 req.logical_id = cpu_to_le16(map_index);
4157
4158 switch (ring_type) {
4159 case HWRM_RING_ALLOC_TX:
4160 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4161 /* Association of transmit ring with completion ring */
4162 req.cmpl_ring_id =
4163 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
4164 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4165 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
4166 req.queue_id = cpu_to_le16(ring->queue_id);
4167 break;
4168 case HWRM_RING_ALLOC_RX:
4169 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4170 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4171 break;
4172 case HWRM_RING_ALLOC_AGG:
4173 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4174 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4175 break;
4176 case HWRM_RING_ALLOC_CMPL:
Michael Chanbac9a7e2017-02-12 19:18:10 -05004177 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
Michael Chanc0c050c2015-10-22 16:01:17 -04004178 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4179 if (bp->flags & BNXT_FLAG_USING_MSIX)
4180 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4181 break;
4182 default:
4183 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4184 ring_type);
4185 return -1;
4186 }
4187
4188 mutex_lock(&bp->hwrm_cmd_lock);
4189 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4190 err = le16_to_cpu(resp->error_code);
4191 ring_id = le16_to_cpu(resp->ring_id);
4192 mutex_unlock(&bp->hwrm_cmd_lock);
4193
4194 if (rc || err) {
4195 switch (ring_type) {
Michael Chanbac9a7e2017-02-12 19:18:10 -05004196 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
Michael Chanc0c050c2015-10-22 16:01:17 -04004197 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
4198 rc, err);
4199 return -1;
4200
4201 case RING_FREE_REQ_RING_TYPE_RX:
4202 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
4203 rc, err);
4204 return -1;
4205
4206 case RING_FREE_REQ_RING_TYPE_TX:
4207 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
4208 rc, err);
4209 return -1;
4210
4211 default:
4212 netdev_err(bp->dev, "Invalid ring\n");
4213 return -1;
4214 }
4215 }
4216 ring->fw_ring_id = ring_id;
4217 return rc;
4218}
4219
Michael Chan486b5c22016-12-29 12:13:42 -05004220static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4221{
4222 int rc;
4223
4224 if (BNXT_PF(bp)) {
4225 struct hwrm_func_cfg_input req = {0};
4226
4227 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4228 req.fid = cpu_to_le16(0xffff);
4229 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4230 req.async_event_cr = cpu_to_le16(idx);
4231 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4232 } else {
4233 struct hwrm_func_vf_cfg_input req = {0};
4234
4235 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4236 req.enables =
4237 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4238 req.async_event_cr = cpu_to_le16(idx);
4239 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4240 }
4241 return rc;
4242}
4243
Michael Chanc0c050c2015-10-22 16:01:17 -04004244static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4245{
4246 int i, rc = 0;
4247
Michael Chanedd0c2c2015-12-27 18:19:19 -05004248 for (i = 0; i < bp->cp_nr_rings; i++) {
4249 struct bnxt_napi *bnapi = bp->bnapi[i];
4250 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4251 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04004252
Prashant Sreedharan33e52d82016-03-28 19:46:04 -04004253 cpr->cp_doorbell = bp->bar1 + i * 0x80;
Michael Chanedd0c2c2015-12-27 18:19:19 -05004254 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
4255 INVALID_STATS_CTX_ID);
4256 if (rc)
4257 goto err_out;
Michael Chanedd0c2c2015-12-27 18:19:19 -05004258 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4259 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
Michael Chan486b5c22016-12-29 12:13:42 -05004260
4261 if (!i) {
4262 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4263 if (rc)
4264 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4265 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004266 }
4267
Michael Chanedd0c2c2015-12-27 18:19:19 -05004268 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004269 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05004270 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05004271 u32 map_idx = txr->bnapi->index;
4272 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
Michael Chanc0c050c2015-10-22 16:01:17 -04004273
Michael Chanb81a90d2016-01-02 23:45:01 -05004274 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
4275 map_idx, fw_stats_ctx);
Michael Chanedd0c2c2015-12-27 18:19:19 -05004276 if (rc)
4277 goto err_out;
Michael Chanb81a90d2016-01-02 23:45:01 -05004278 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanc0c050c2015-10-22 16:01:17 -04004279 }
4280
Michael Chanedd0c2c2015-12-27 18:19:19 -05004281 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004282 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05004283 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05004284 u32 map_idx = rxr->bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04004285
Michael Chanb81a90d2016-01-02 23:45:01 -05004286 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
4287 map_idx, INVALID_STATS_CTX_ID);
Michael Chanedd0c2c2015-12-27 18:19:19 -05004288 if (rc)
4289 goto err_out;
Michael Chanb81a90d2016-01-02 23:45:01 -05004290 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanedd0c2c2015-12-27 18:19:19 -05004291 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
Michael Chanb81a90d2016-01-02 23:45:01 -05004292 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004293 }
4294
4295 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4296 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004297 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04004298 struct bnxt_ring_struct *ring =
4299 &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05004300 u32 grp_idx = rxr->bnapi->index;
4301 u32 map_idx = grp_idx + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004302
4303 rc = hwrm_ring_alloc_send_msg(bp, ring,
4304 HWRM_RING_ALLOC_AGG,
Michael Chanb81a90d2016-01-02 23:45:01 -05004305 map_idx,
Michael Chanc0c050c2015-10-22 16:01:17 -04004306 INVALID_STATS_CTX_ID);
4307 if (rc)
4308 goto err_out;
4309
Michael Chanb81a90d2016-01-02 23:45:01 -05004310 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanc0c050c2015-10-22 16:01:17 -04004311 writel(DB_KEY_RX | rxr->rx_agg_prod,
4312 rxr->rx_agg_doorbell);
Michael Chanb81a90d2016-01-02 23:45:01 -05004313 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004314 }
4315 }
4316err_out:
4317 return rc;
4318}
4319
4320static int hwrm_ring_free_send_msg(struct bnxt *bp,
4321 struct bnxt_ring_struct *ring,
4322 u32 ring_type, int cmpl_ring_id)
4323{
4324 int rc;
4325 struct hwrm_ring_free_input req = {0};
4326 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
4327 u16 error_code;
4328
Prashant Sreedharan74608fc2016-01-28 03:11:20 -05004329 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004330 req.ring_type = ring_type;
4331 req.ring_id = cpu_to_le16(ring->fw_ring_id);
4332
4333 mutex_lock(&bp->hwrm_cmd_lock);
4334 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4335 error_code = le16_to_cpu(resp->error_code);
4336 mutex_unlock(&bp->hwrm_cmd_lock);
4337
4338 if (rc || error_code) {
4339 switch (ring_type) {
Michael Chanbac9a7e2017-02-12 19:18:10 -05004340 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
Michael Chanc0c050c2015-10-22 16:01:17 -04004341 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
4342 rc);
4343 return rc;
4344 case RING_FREE_REQ_RING_TYPE_RX:
4345 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
4346 rc);
4347 return rc;
4348 case RING_FREE_REQ_RING_TYPE_TX:
4349 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
4350 rc);
4351 return rc;
4352 default:
4353 netdev_err(bp->dev, "Invalid ring\n");
4354 return -1;
4355 }
4356 }
4357 return 0;
4358}
4359
Michael Chanedd0c2c2015-12-27 18:19:19 -05004360static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
Michael Chanc0c050c2015-10-22 16:01:17 -04004361{
Michael Chanedd0c2c2015-12-27 18:19:19 -05004362 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04004363
4364 if (!bp->bnapi)
Michael Chanedd0c2c2015-12-27 18:19:19 -05004365 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04004366
Michael Chanedd0c2c2015-12-27 18:19:19 -05004367 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004368 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05004369 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05004370 u32 grp_idx = txr->bnapi->index;
4371 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004372
Michael Chanedd0c2c2015-12-27 18:19:19 -05004373 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4374 hwrm_ring_free_send_msg(bp, ring,
4375 RING_FREE_REQ_RING_TYPE_TX,
4376 close_path ? cmpl_ring_id :
4377 INVALID_HW_RING_ID);
4378 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004379 }
4380 }
4381
Michael Chanedd0c2c2015-12-27 18:19:19 -05004382 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004383 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05004384 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05004385 u32 grp_idx = rxr->bnapi->index;
4386 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004387
Michael Chanedd0c2c2015-12-27 18:19:19 -05004388 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4389 hwrm_ring_free_send_msg(bp, ring,
4390 RING_FREE_REQ_RING_TYPE_RX,
4391 close_path ? cmpl_ring_id :
4392 INVALID_HW_RING_ID);
4393 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05004394 bp->grp_info[grp_idx].rx_fw_ring_id =
4395 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004396 }
4397 }
4398
Michael Chanedd0c2c2015-12-27 18:19:19 -05004399 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004400 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05004401 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05004402 u32 grp_idx = rxr->bnapi->index;
4403 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004404
Michael Chanedd0c2c2015-12-27 18:19:19 -05004405 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4406 hwrm_ring_free_send_msg(bp, ring,
4407 RING_FREE_REQ_RING_TYPE_RX,
4408 close_path ? cmpl_ring_id :
4409 INVALID_HW_RING_ID);
4410 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05004411 bp->grp_info[grp_idx].agg_fw_ring_id =
4412 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004413 }
4414 }
4415
Michael Chan9d8bc092016-12-29 12:13:33 -05004416 /* The completion rings are about to be freed. After that the
4417 * IRQ doorbell will not work anymore. So we need to disable
4418 * IRQ here.
4419 */
4420 bnxt_disable_int_sync(bp);
4421
Michael Chanedd0c2c2015-12-27 18:19:19 -05004422 for (i = 0; i < bp->cp_nr_rings; i++) {
4423 struct bnxt_napi *bnapi = bp->bnapi[i];
4424 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4425 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04004426
Michael Chanedd0c2c2015-12-27 18:19:19 -05004427 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4428 hwrm_ring_free_send_msg(bp, ring,
Michael Chanbac9a7e2017-02-12 19:18:10 -05004429 RING_FREE_REQ_RING_TYPE_L2_CMPL,
Michael Chanedd0c2c2015-12-27 18:19:19 -05004430 INVALID_HW_RING_ID);
4431 ring->fw_ring_id = INVALID_HW_RING_ID;
4432 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004433 }
4434 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004435}
4436
Michael Chan391be5c2016-12-29 12:13:41 -05004437/* Caller must hold bp->hwrm_cmd_lock */
4438int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4439{
4440 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4441 struct hwrm_func_qcfg_input req = {0};
4442 int rc;
4443
4444 if (bp->hwrm_spec_code < 0x10601)
4445 return 0;
4446
4447 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4448 req.fid = cpu_to_le16(fid);
4449 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4450 if (!rc)
4451 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
4452
4453 return rc;
4454}
4455
Michael Chand1e79252017-02-06 16:55:38 -05004456static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
Michael Chan391be5c2016-12-29 12:13:41 -05004457{
4458 struct hwrm_func_cfg_input req = {0};
4459 int rc;
4460
4461 if (bp->hwrm_spec_code < 0x10601)
4462 return 0;
4463
4464 if (BNXT_VF(bp))
4465 return 0;
4466
4467 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4468 req.fid = cpu_to_le16(0xffff);
4469 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4470 req.num_tx_rings = cpu_to_le16(*tx_rings);
4471 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4472 if (rc)
4473 return rc;
4474
4475 mutex_lock(&bp->hwrm_cmd_lock);
4476 rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
4477 mutex_unlock(&bp->hwrm_cmd_lock);
Michael Chan98fdbe72017-08-28 13:40:26 -04004478 if (!rc)
4479 bp->tx_reserved_rings = *tx_rings;
Michael Chan391be5c2016-12-29 12:13:41 -05004480 return rc;
4481}
4482
Michael Chan98fdbe72017-08-28 13:40:26 -04004483static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
4484{
4485 struct hwrm_func_cfg_input req = {0};
4486 int rc;
4487
4488 if (bp->hwrm_spec_code < 0x10801)
4489 return 0;
4490
4491 if (BNXT_VF(bp))
4492 return 0;
4493
4494 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4495 req.fid = cpu_to_le16(0xffff);
4496 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
4497 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4498 req.num_tx_rings = cpu_to_le16(tx_rings);
4499 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4500 if (rc)
4501 return -ENOMEM;
4502 return 0;
4503}
4504
Michael Chanbb053f52016-02-26 04:00:02 -05004505static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
4506 u32 buf_tmrs, u16 flags,
4507 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4508{
4509 req->flags = cpu_to_le16(flags);
4510 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
4511 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
4512 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
4513 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
4514 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4515 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
4516 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
4517 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
4518}
4519
Michael Chanc0c050c2015-10-22 16:01:17 -04004520int bnxt_hwrm_set_coal(struct bnxt *bp)
4521{
4522 int i, rc = 0;
Michael Chandfc9c942016-02-26 04:00:03 -05004523 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
4524 req_tx = {0}, *req;
Michael Chanc0c050c2015-10-22 16:01:17 -04004525 u16 max_buf, max_buf_irq;
4526 u16 buf_tmr, buf_tmr_irq;
4527 u32 flags;
4528
Michael Chandfc9c942016-02-26 04:00:03 -05004529 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4530 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4531 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
4532 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004533
Michael Chandfb5b892016-02-26 04:00:01 -05004534 /* Each rx completion (2 records) should be DMAed immediately.
4535 * DMA 1/4 of the completion buffers at a time.
4536 */
4537 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
Michael Chanc0c050c2015-10-22 16:01:17 -04004538 /* max_buf must not be zero */
4539 max_buf = clamp_t(u16, max_buf, 1, 63);
Michael Chandfb5b892016-02-26 04:00:01 -05004540 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4541 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4542 /* buf timer set to 1/4 of interrupt timer */
4543 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4544 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4545 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004546
4547 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4548
4549 /* RING_IDLE generates more IRQs for lower latency. Enable it only
4550 * if coal_ticks is less than 25 us.
4551 */
Michael Chandfb5b892016-02-26 04:00:01 -05004552 if (bp->rx_coal_ticks < 25)
Michael Chanc0c050c2015-10-22 16:01:17 -04004553 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4554
Michael Chanbb053f52016-02-26 04:00:02 -05004555 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
Michael Chandfc9c942016-02-26 04:00:03 -05004556 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4557
4558 /* max_buf must not be zero */
4559 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4560 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4561 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4562 /* buf timer set to 1/4 of interrupt timer */
4563 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4564 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4565 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4566
4567 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4568 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4569 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004570
4571 mutex_lock(&bp->hwrm_cmd_lock);
4572 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chandfc9c942016-02-26 04:00:03 -05004573 struct bnxt_napi *bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04004574
Michael Chandfc9c942016-02-26 04:00:03 -05004575 req = &req_rx;
4576 if (!bnapi->rx_ring)
4577 req = &req_tx;
4578 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4579
4580 rc = _hwrm_send_message(bp, req, sizeof(*req),
Michael Chanc0c050c2015-10-22 16:01:17 -04004581 HWRM_CMD_TIMEOUT);
4582 if (rc)
4583 break;
4584 }
4585 mutex_unlock(&bp->hwrm_cmd_lock);
4586 return rc;
4587}
4588
4589static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4590{
4591 int rc = 0, i;
4592 struct hwrm_stat_ctx_free_input req = {0};
4593
4594 if (!bp->bnapi)
4595 return 0;
4596
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004597 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4598 return 0;
4599
Michael Chanc0c050c2015-10-22 16:01:17 -04004600 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4601
4602 mutex_lock(&bp->hwrm_cmd_lock);
4603 for (i = 0; i < bp->cp_nr_rings; i++) {
4604 struct bnxt_napi *bnapi = bp->bnapi[i];
4605 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4606
4607 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4608 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4609
4610 rc = _hwrm_send_message(bp, &req, sizeof(req),
4611 HWRM_CMD_TIMEOUT);
4612 if (rc)
4613 break;
4614
4615 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4616 }
4617 }
4618 mutex_unlock(&bp->hwrm_cmd_lock);
4619 return rc;
4620}
4621
4622static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4623{
4624 int rc = 0, i;
4625 struct hwrm_stat_ctx_alloc_input req = {0};
4626 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4627
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004628 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4629 return 0;
4630
Michael Chanc0c050c2015-10-22 16:01:17 -04004631 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4632
Michael Chan51f30782016-07-01 18:46:29 -04004633 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
Michael Chanc0c050c2015-10-22 16:01:17 -04004634
4635 mutex_lock(&bp->hwrm_cmd_lock);
4636 for (i = 0; i < bp->cp_nr_rings; i++) {
4637 struct bnxt_napi *bnapi = bp->bnapi[i];
4638 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4639
4640 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4641
4642 rc = _hwrm_send_message(bp, &req, sizeof(req),
4643 HWRM_CMD_TIMEOUT);
4644 if (rc)
4645 break;
4646
4647 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4648
4649 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4650 }
4651 mutex_unlock(&bp->hwrm_cmd_lock);
Pan Bian89aa8442016-12-03 17:56:17 +08004652 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04004653}
4654
Michael Chancf6645f2016-06-13 02:25:28 -04004655static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4656{
4657 struct hwrm_func_qcfg_input req = {0};
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04004658 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan9315edc2017-07-24 12:34:25 -04004659 u16 flags;
Michael Chancf6645f2016-06-13 02:25:28 -04004660 int rc;
4661
4662 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4663 req.fid = cpu_to_le16(0xffff);
4664 mutex_lock(&bp->hwrm_cmd_lock);
4665 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4666 if (rc)
4667 goto func_qcfg_exit;
4668
4669#ifdef CONFIG_BNXT_SRIOV
4670 if (BNXT_VF(bp)) {
Michael Chancf6645f2016-06-13 02:25:28 -04004671 struct bnxt_vf_info *vf = &bp->vf;
4672
4673 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4674 }
4675#endif
Michael Chan9315edc2017-07-24 12:34:25 -04004676 flags = le16_to_cpu(resp->flags);
4677 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
4678 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
4679 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
4680 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
4681 bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
Deepak Khungar9e54e322017-04-21 20:11:26 -04004682 }
Michael Chan9315edc2017-07-24 12:34:25 -04004683 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
4684 bp->flags |= BNXT_FLAG_MULTI_HOST;
Michael Chanbc39f882017-03-08 18:44:34 -05004685
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04004686 switch (resp->port_partition_type) {
4687 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4688 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4689 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4690 bp->port_partition_type = resp->port_partition_type;
4691 break;
4692 }
Michael Chan32e8239c2017-07-24 12:34:21 -04004693 if (bp->hwrm_spec_code < 0x10707 ||
4694 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
4695 bp->br_mode = BRIDGE_MODE_VEB;
4696 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
4697 bp->br_mode = BRIDGE_MODE_VEPA;
4698 else
4699 bp->br_mode = BRIDGE_MODE_UNDEF;
Michael Chancf6645f2016-06-13 02:25:28 -04004700
4701func_qcfg_exit:
4702 mutex_unlock(&bp->hwrm_cmd_lock);
4703 return rc;
4704}
4705
Michael Chan7b08f662016-12-07 00:26:18 -05004706static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04004707{
4708 int rc = 0;
4709 struct hwrm_func_qcaps_input req = {0};
4710 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4711
4712 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4713 req.fid = cpu_to_le16(0xffff);
4714
4715 mutex_lock(&bp->hwrm_cmd_lock);
4716 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4717 if (rc)
4718 goto hwrm_func_qcaps_exit;
4719
Michael Chane4060d32016-12-07 00:26:19 -05004720 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
4721 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
4722 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
4723 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
4724
Michael Chan7cc5a202016-09-19 03:58:05 -04004725 bp->tx_push_thresh = 0;
4726 if (resp->flags &
4727 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4728 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4729
Michael Chanc0c050c2015-10-22 16:01:17 -04004730 if (BNXT_PF(bp)) {
4731 struct bnxt_pf_info *pf = &bp->pf;
4732
4733 pf->fw_fid = le16_to_cpu(resp->fid);
4734 pf->port_id = le16_to_cpu(resp->port_id);
Michael Chan87027db2016-07-01 18:46:28 -04004735 bp->dev->dev_port = pf->port_id;
Michael Chan11f15ed2016-04-05 14:08:55 -04004736 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
Jeffrey Huangbdd43472015-12-02 01:54:07 -05004737 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04004738 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4739 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4740 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004741 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05004742 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4743 if (!pf->max_hw_ring_grps)
4744 pf->max_hw_ring_grps = pf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004745 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4746 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4747 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4748 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4749 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4750 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4751 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4752 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4753 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4754 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4755 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
Michael Chanc1ef1462017-04-04 18:14:07 -04004756 if (resp->flags &
4757 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
4758 bp->flags |= BNXT_FLAG_WOL_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -04004759 } else {
Michael Chan379a80a2015-10-23 15:06:19 -04004760#ifdef CONFIG_BNXT_SRIOV
Michael Chanc0c050c2015-10-22 16:01:17 -04004761 struct bnxt_vf_info *vf = &bp->vf;
4762
4763 vf->fw_fid = le16_to_cpu(resp->fid);
Michael Chanc0c050c2015-10-22 16:01:17 -04004764
4765 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4766 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4767 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4768 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05004769 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4770 if (!vf->max_hw_ring_grps)
4771 vf->max_hw_ring_grps = vf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004772 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4773 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4774 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
Michael Chan7cc5a202016-09-19 03:58:05 -04004775
4776 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
Michael Chan001154e2016-09-19 03:58:06 -04004777 mutex_unlock(&bp->hwrm_cmd_lock);
4778
4779 if (is_valid_ether_addr(vf->mac_addr)) {
Michael Chan7cc5a202016-09-19 03:58:05 -04004780 /* overwrite netdev dev_adr with admin VF MAC */
4781 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
Michael Chan001154e2016-09-19 03:58:06 -04004782 } else {
Tobias Klauser1faaa782017-02-21 15:27:28 +01004783 eth_hw_addr_random(bp->dev);
Michael Chan001154e2016-09-19 03:58:06 -04004784 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4785 }
4786 return rc;
Michael Chan379a80a2015-10-23 15:06:19 -04004787#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04004788 }
4789
Michael Chanc0c050c2015-10-22 16:01:17 -04004790hwrm_func_qcaps_exit:
4791 mutex_unlock(&bp->hwrm_cmd_lock);
4792 return rc;
4793}
4794
4795static int bnxt_hwrm_func_reset(struct bnxt *bp)
4796{
4797 struct hwrm_func_reset_input req = {0};
4798
4799 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4800 req.enables = 0;
4801
4802 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4803}
4804
4805static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4806{
4807 int rc = 0;
4808 struct hwrm_queue_qportcfg_input req = {0};
4809 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4810 u8 i, *qptr;
4811
4812 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4813
4814 mutex_lock(&bp->hwrm_cmd_lock);
4815 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4816 if (rc)
4817 goto qportcfg_exit;
4818
4819 if (!resp->max_configurable_queues) {
4820 rc = -EINVAL;
4821 goto qportcfg_exit;
4822 }
4823 bp->max_tc = resp->max_configurable_queues;
Michael Chan87c374d2016-12-02 21:17:16 -05004824 bp->max_lltc = resp->max_configurable_lossless_queues;
Michael Chanc0c050c2015-10-22 16:01:17 -04004825 if (bp->max_tc > BNXT_MAX_QUEUE)
4826 bp->max_tc = BNXT_MAX_QUEUE;
4827
Michael Chan441cabb2016-09-19 03:58:02 -04004828 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4829 bp->max_tc = 1;
4830
Michael Chan87c374d2016-12-02 21:17:16 -05004831 if (bp->max_lltc > bp->max_tc)
4832 bp->max_lltc = bp->max_tc;
4833
Michael Chanc0c050c2015-10-22 16:01:17 -04004834 qptr = &resp->queue_id0;
4835 for (i = 0; i < bp->max_tc; i++) {
4836 bp->q_info[i].queue_id = *qptr++;
4837 bp->q_info[i].queue_profile = *qptr++;
4838 }
4839
4840qportcfg_exit:
4841 mutex_unlock(&bp->hwrm_cmd_lock);
4842 return rc;
4843}
4844
4845static int bnxt_hwrm_ver_get(struct bnxt *bp)
4846{
4847 int rc;
4848 struct hwrm_ver_get_input req = {0};
4849 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
Deepak Khungare605db82017-05-29 19:06:04 -04004850 u32 dev_caps_cfg;
Michael Chanc0c050c2015-10-22 16:01:17 -04004851
Michael Chane6ef2692016-03-28 19:46:05 -04004852 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
Michael Chanc0c050c2015-10-22 16:01:17 -04004853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4854 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4855 req.hwrm_intf_min = HWRM_VERSION_MINOR;
4856 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4857 mutex_lock(&bp->hwrm_cmd_lock);
4858 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4859 if (rc)
4860 goto hwrm_ver_get_exit;
4861
4862 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4863
Michael Chan11f15ed2016-04-05 14:08:55 -04004864 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4865 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
Michael Chanc1935542015-12-27 18:19:28 -05004866 if (resp->hwrm_intf_maj < 1) {
4867 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04004868 resp->hwrm_intf_maj, resp->hwrm_intf_min,
Michael Chanc1935542015-12-27 18:19:28 -05004869 resp->hwrm_intf_upd);
4870 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04004871 }
Rob Swindell3ebf6f02016-02-26 04:00:06 -05004872 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
Michael Chanc0c050c2015-10-22 16:01:17 -04004873 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4874 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4875
Michael Chanff4fe812016-02-26 04:00:04 -05004876 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4877 if (!bp->hwrm_cmd_timeout)
4878 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4879
Michael Chane6ef2692016-03-28 19:46:05 -04004880 if (resp->hwrm_intf_maj >= 1)
4881 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4882
Michael Chan659c8052016-06-13 02:25:33 -04004883 bp->chip_num = le16_to_cpu(resp->chip_num);
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004884 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4885 !resp->chip_metal)
4886 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
Michael Chan659c8052016-06-13 02:25:33 -04004887
Deepak Khungare605db82017-05-29 19:06:04 -04004888 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
4889 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
4890 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
4891 bp->flags |= BNXT_FLAG_SHORT_CMD;
4892
Michael Chanc0c050c2015-10-22 16:01:17 -04004893hwrm_ver_get_exit:
4894 mutex_unlock(&bp->hwrm_cmd_lock);
4895 return rc;
4896}
4897
Rob Swindell5ac67d82016-09-19 03:58:03 -04004898int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4899{
Rob Swindell878786d2016-09-20 03:36:33 -04004900#if IS_ENABLED(CONFIG_RTC_LIB)
Rob Swindell5ac67d82016-09-19 03:58:03 -04004901 struct hwrm_fw_set_time_input req = {0};
4902 struct rtc_time tm;
4903 struct timeval tv;
4904
4905 if (bp->hwrm_spec_code < 0x10400)
4906 return -EOPNOTSUPP;
4907
4908 do_gettimeofday(&tv);
4909 rtc_time_to_tm(tv.tv_sec, &tm);
4910 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4911 req.year = cpu_to_le16(1900 + tm.tm_year);
4912 req.month = 1 + tm.tm_mon;
4913 req.day = tm.tm_mday;
4914 req.hour = tm.tm_hour;
4915 req.minute = tm.tm_min;
4916 req.second = tm.tm_sec;
4917 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Rob Swindell878786d2016-09-20 03:36:33 -04004918#else
4919 return -EOPNOTSUPP;
4920#endif
Rob Swindell5ac67d82016-09-19 03:58:03 -04004921}
4922
Michael Chan3bdf56c2016-03-07 15:38:45 -05004923static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4924{
4925 int rc;
4926 struct bnxt_pf_info *pf = &bp->pf;
4927 struct hwrm_port_qstats_input req = {0};
4928
4929 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4930 return 0;
4931
4932 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4933 req.port_id = cpu_to_le16(pf->port_id);
4934 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4935 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4936 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4937 return rc;
4938}
4939
Michael Chanc0c050c2015-10-22 16:01:17 -04004940static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4941{
4942 if (bp->vxlan_port_cnt) {
4943 bnxt_hwrm_tunnel_dst_port_free(
4944 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4945 }
4946 bp->vxlan_port_cnt = 0;
4947 if (bp->nge_port_cnt) {
4948 bnxt_hwrm_tunnel_dst_port_free(
4949 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4950 }
4951 bp->nge_port_cnt = 0;
4952}
4953
4954static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4955{
4956 int rc, i;
4957 u32 tpa_flags = 0;
4958
4959 if (set_tpa)
4960 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4961 for (i = 0; i < bp->nr_vnics; i++) {
4962 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4963 if (rc) {
4964 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
Sankar Patchineelam23e12c82017-03-28 19:47:30 -04004965 i, rc);
Michael Chanc0c050c2015-10-22 16:01:17 -04004966 return rc;
4967 }
4968 }
4969 return 0;
4970}
4971
4972static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4973{
4974 int i;
4975
4976 for (i = 0; i < bp->nr_vnics; i++)
4977 bnxt_hwrm_vnic_set_rss(bp, i, false);
4978}
4979
4980static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4981 bool irq_re_init)
4982{
4983 if (bp->vnic_info) {
4984 bnxt_hwrm_clear_vnic_filter(bp);
4985 /* clear all RSS setting before free vnic ctx */
4986 bnxt_hwrm_clear_vnic_rss(bp);
4987 bnxt_hwrm_vnic_ctx_free(bp);
4988 /* before free the vnic, undo the vnic tpa settings */
4989 if (bp->flags & BNXT_FLAG_TPA)
4990 bnxt_set_tpa(bp, false);
4991 bnxt_hwrm_vnic_free(bp);
4992 }
4993 bnxt_hwrm_ring_free(bp, close_path);
4994 bnxt_hwrm_ring_grp_free(bp);
4995 if (irq_re_init) {
4996 bnxt_hwrm_stat_ctx_free(bp);
4997 bnxt_hwrm_free_tunnel_ports(bp);
4998 }
4999}
5000
Michael Chan39d8ba22017-07-24 12:34:22 -04005001static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
5002{
5003 struct hwrm_func_cfg_input req = {0};
5004 int rc;
5005
5006 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5007 req.fid = cpu_to_le16(0xffff);
5008 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
5009 if (br_mode == BRIDGE_MODE_VEB)
5010 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
5011 else if (br_mode == BRIDGE_MODE_VEPA)
5012 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
5013 else
5014 return -EINVAL;
5015 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5016 if (rc)
5017 rc = -EIO;
5018 return rc;
5019}
5020
Michael Chanc0c050c2015-10-22 16:01:17 -04005021static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
5022{
Michael Chanae10ae72016-12-29 12:13:38 -05005023 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
Michael Chanc0c050c2015-10-22 16:01:17 -04005024 int rc;
5025
Michael Chanae10ae72016-12-29 12:13:38 -05005026 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
5027 goto skip_rss_ctx;
5028
Michael Chanc0c050c2015-10-22 16:01:17 -04005029 /* allocate context for vnic */
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04005030 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
Michael Chanc0c050c2015-10-22 16:01:17 -04005031 if (rc) {
5032 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
5033 vnic_id, rc);
5034 goto vnic_setup_err;
5035 }
5036 bp->rsscos_nr_ctxs++;
5037
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04005038 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5039 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
5040 if (rc) {
5041 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
5042 vnic_id, rc);
5043 goto vnic_setup_err;
5044 }
5045 bp->rsscos_nr_ctxs++;
5046 }
5047
Michael Chanae10ae72016-12-29 12:13:38 -05005048skip_rss_ctx:
Michael Chanc0c050c2015-10-22 16:01:17 -04005049 /* configure default vnic, ring grp */
5050 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
5051 if (rc) {
5052 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
5053 vnic_id, rc);
5054 goto vnic_setup_err;
5055 }
5056
5057 /* Enable RSS hashing on vnic */
5058 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
5059 if (rc) {
5060 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
5061 vnic_id, rc);
5062 goto vnic_setup_err;
5063 }
5064
5065 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5066 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
5067 if (rc) {
5068 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
5069 vnic_id, rc);
5070 }
5071 }
5072
5073vnic_setup_err:
5074 return rc;
5075}
5076
5077static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
5078{
5079#ifdef CONFIG_RFS_ACCEL
5080 int i, rc = 0;
5081
5082 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanae10ae72016-12-29 12:13:38 -05005083 struct bnxt_vnic_info *vnic;
Michael Chanc0c050c2015-10-22 16:01:17 -04005084 u16 vnic_id = i + 1;
5085 u16 ring_id = i;
5086
5087 if (vnic_id >= bp->nr_vnics)
5088 break;
5089
Michael Chanae10ae72016-12-29 12:13:38 -05005090 vnic = &bp->vnic_info[vnic_id];
5091 vnic->flags |= BNXT_VNIC_RFS_FLAG;
5092 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
5093 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
Michael Chanb81a90d2016-01-02 23:45:01 -05005094 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04005095 if (rc) {
5096 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
5097 vnic_id, rc);
5098 break;
5099 }
5100 rc = bnxt_setup_vnic(bp, vnic_id);
5101 if (rc)
5102 break;
5103 }
5104 return rc;
5105#else
5106 return 0;
5107#endif
5108}
5109
Michael Chan17c71ac2016-07-01 18:46:27 -04005110/* Allow PF and VF with default VLAN to be in promiscuous mode */
5111static bool bnxt_promisc_ok(struct bnxt *bp)
5112{
5113#ifdef CONFIG_BNXT_SRIOV
5114 if (BNXT_VF(bp) && !bp->vf.vlan)
5115 return false;
5116#endif
5117 return true;
5118}
5119
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04005120static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
5121{
5122 unsigned int rc = 0;
5123
5124 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
5125 if (rc) {
5126 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5127 rc);
5128 return rc;
5129 }
5130
5131 rc = bnxt_hwrm_vnic_cfg(bp, 1);
5132 if (rc) {
5133 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5134 rc);
5135 return rc;
5136 }
5137 return rc;
5138}
5139
Michael Chanb664f002015-12-02 01:54:08 -05005140static int bnxt_cfg_rx_mode(struct bnxt *);
Michael Chan7d2837d2016-05-04 16:56:44 -04005141static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
Michael Chanb664f002015-12-02 01:54:08 -05005142
Michael Chanc0c050c2015-10-22 16:01:17 -04005143static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
5144{
Michael Chan7d2837d2016-05-04 16:56:44 -04005145 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
Michael Chanc0c050c2015-10-22 16:01:17 -04005146 int rc = 0;
Prashant Sreedharan76595192016-07-18 07:15:22 -04005147 unsigned int rx_nr_rings = bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04005148
5149 if (irq_re_init) {
5150 rc = bnxt_hwrm_stat_ctx_alloc(bp);
5151 if (rc) {
5152 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
5153 rc);
5154 goto err_out;
5155 }
Michael Chan98fdbe72017-08-28 13:40:26 -04005156 if (bp->tx_reserved_rings != bp->tx_nr_rings) {
5157 int tx = bp->tx_nr_rings;
5158
5159 if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
5160 tx < bp->tx_nr_rings) {
5161 rc = -ENOMEM;
5162 goto err_out;
5163 }
5164 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005165 }
5166
5167 rc = bnxt_hwrm_ring_alloc(bp);
5168 if (rc) {
5169 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
5170 goto err_out;
5171 }
5172
5173 rc = bnxt_hwrm_ring_grp_alloc(bp);
5174 if (rc) {
5175 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
5176 goto err_out;
5177 }
5178
Prashant Sreedharan76595192016-07-18 07:15:22 -04005179 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5180 rx_nr_rings--;
5181
Michael Chanc0c050c2015-10-22 16:01:17 -04005182 /* default vnic 0 */
Prashant Sreedharan76595192016-07-18 07:15:22 -04005183 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04005184 if (rc) {
5185 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
5186 goto err_out;
5187 }
5188
5189 rc = bnxt_setup_vnic(bp, 0);
5190 if (rc)
5191 goto err_out;
5192
5193 if (bp->flags & BNXT_FLAG_RFS) {
5194 rc = bnxt_alloc_rfs_vnics(bp);
5195 if (rc)
5196 goto err_out;
5197 }
5198
5199 if (bp->flags & BNXT_FLAG_TPA) {
5200 rc = bnxt_set_tpa(bp, true);
5201 if (rc)
5202 goto err_out;
5203 }
5204
5205 if (BNXT_VF(bp))
5206 bnxt_update_vf_mac(bp);
5207
5208 /* Filter for default vnic 0 */
5209 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
5210 if (rc) {
5211 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
5212 goto err_out;
5213 }
Michael Chan7d2837d2016-05-04 16:56:44 -04005214 vnic->uc_filter_count = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04005215
Michael Chan7d2837d2016-05-04 16:56:44 -04005216 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
Michael Chanc0c050c2015-10-22 16:01:17 -04005217
Michael Chan17c71ac2016-07-01 18:46:27 -04005218 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chan7d2837d2016-05-04 16:56:44 -04005219 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5220
5221 if (bp->dev->flags & IFF_ALLMULTI) {
5222 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5223 vnic->mc_list_count = 0;
5224 } else {
5225 u32 mask = 0;
5226
5227 bnxt_mc_list_updated(bp, &mask);
5228 vnic->rx_mask |= mask;
5229 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005230
Michael Chanb664f002015-12-02 01:54:08 -05005231 rc = bnxt_cfg_rx_mode(bp);
5232 if (rc)
Michael Chanc0c050c2015-10-22 16:01:17 -04005233 goto err_out;
Michael Chanc0c050c2015-10-22 16:01:17 -04005234
5235 rc = bnxt_hwrm_set_coal(bp);
5236 if (rc)
5237 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04005238 rc);
5239
5240 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5241 rc = bnxt_setup_nitroa0_vnic(bp);
5242 if (rc)
5243 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
5244 rc);
5245 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005246
Michael Chancf6645f2016-06-13 02:25:28 -04005247 if (BNXT_VF(bp)) {
5248 bnxt_hwrm_func_qcfg(bp);
5249 netdev_update_features(bp->dev);
5250 }
5251
Michael Chanc0c050c2015-10-22 16:01:17 -04005252 return 0;
5253
5254err_out:
5255 bnxt_hwrm_resource_free(bp, 0, true);
5256
5257 return rc;
5258}
5259
5260static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5261{
5262 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
5263 return 0;
5264}
5265
5266static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5267{
Sankar Patchineelam22479252017-03-28 19:47:29 -04005268 bnxt_init_cp_rings(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005269 bnxt_init_rx_rings(bp);
5270 bnxt_init_tx_rings(bp);
5271 bnxt_init_ring_grps(bp, irq_re_init);
5272 bnxt_init_vnics(bp);
5273
5274 return bnxt_init_chip(bp, irq_re_init);
5275}
5276
Michael Chanc0c050c2015-10-22 16:01:17 -04005277static int bnxt_set_real_num_queues(struct bnxt *bp)
5278{
5279 int rc;
5280 struct net_device *dev = bp->dev;
5281
Michael Chan5f449242017-02-06 16:55:40 -05005282 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
5283 bp->tx_nr_rings_xdp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005284 if (rc)
5285 return rc;
5286
5287 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
5288 if (rc)
5289 return rc;
5290
5291#ifdef CONFIG_RFS_ACCEL
Michael Chan45019a12015-12-27 18:19:22 -05005292 if (bp->flags & BNXT_FLAG_RFS)
Michael Chanc0c050c2015-10-22 16:01:17 -04005293 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04005294#endif
5295
5296 return rc;
5297}
5298
Michael Chan6e6c5a52016-01-02 23:45:02 -05005299static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5300 bool shared)
5301{
5302 int _rx = *rx, _tx = *tx;
5303
5304 if (shared) {
5305 *rx = min_t(int, _rx, max);
5306 *tx = min_t(int, _tx, max);
5307 } else {
5308 if (max < 2)
5309 return -ENOMEM;
5310
5311 while (_rx + _tx > max) {
5312 if (_rx > _tx && _rx > 1)
5313 _rx--;
5314 else if (_tx > 1)
5315 _tx--;
5316 }
5317 *rx = _rx;
5318 *tx = _tx;
5319 }
5320 return 0;
5321}
5322
Michael Chan78095922016-12-07 00:26:16 -05005323static void bnxt_setup_msix(struct bnxt *bp)
5324{
5325 const int len = sizeof(bp->irq_tbl[0].name);
5326 struct net_device *dev = bp->dev;
5327 int tcs, i;
5328
5329 tcs = netdev_get_num_tc(dev);
5330 if (tcs > 1) {
Michael Chand1e79252017-02-06 16:55:38 -05005331 int i, off, count;
Michael Chan78095922016-12-07 00:26:16 -05005332
Michael Chand1e79252017-02-06 16:55:38 -05005333 for (i = 0; i < tcs; i++) {
5334 count = bp->tx_nr_rings_per_tc;
5335 off = i * count;
5336 netdev_set_tc_queue(dev, i, count, off);
Michael Chan78095922016-12-07 00:26:16 -05005337 }
5338 }
5339
5340 for (i = 0; i < bp->cp_nr_rings; i++) {
5341 char *attr;
5342
5343 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5344 attr = "TxRx";
5345 else if (i < bp->rx_nr_rings)
5346 attr = "rx";
5347 else
5348 attr = "tx";
5349
5350 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
5351 i);
5352 bp->irq_tbl[i].handler = bnxt_msix;
5353 }
5354}
5355
5356static void bnxt_setup_inta(struct bnxt *bp)
5357{
5358 const int len = sizeof(bp->irq_tbl[0].name);
5359
5360 if (netdev_get_num_tc(bp->dev))
5361 netdev_reset_tc(bp->dev);
5362
5363 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
5364 0);
5365 bp->irq_tbl[0].handler = bnxt_inta;
5366}
5367
5368static int bnxt_setup_int_mode(struct bnxt *bp)
5369{
5370 int rc;
5371
5372 if (bp->flags & BNXT_FLAG_USING_MSIX)
5373 bnxt_setup_msix(bp);
5374 else
5375 bnxt_setup_inta(bp);
5376
5377 rc = bnxt_set_real_num_queues(bp);
5378 return rc;
5379}
5380
Michael Chanb7429952017-01-13 01:32:00 -05005381#ifdef CONFIG_RFS_ACCEL
Michael Chan8079e8f2016-12-29 12:13:37 -05005382static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
5383{
5384#if defined(CONFIG_BNXT_SRIOV)
5385 if (BNXT_VF(bp))
5386 return bp->vf.max_rsscos_ctxs;
5387#endif
5388 return bp->pf.max_rsscos_ctxs;
5389}
5390
5391static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
5392{
5393#if defined(CONFIG_BNXT_SRIOV)
5394 if (BNXT_VF(bp))
5395 return bp->vf.max_vnics;
5396#endif
5397 return bp->pf.max_vnics;
5398}
Michael Chanb7429952017-01-13 01:32:00 -05005399#endif
Michael Chan8079e8f2016-12-29 12:13:37 -05005400
Michael Chane4060d32016-12-07 00:26:19 -05005401unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
5402{
5403#if defined(CONFIG_BNXT_SRIOV)
5404 if (BNXT_VF(bp))
5405 return bp->vf.max_stat_ctxs;
5406#endif
5407 return bp->pf.max_stat_ctxs;
5408}
5409
Michael Chana588e452016-12-07 00:26:21 -05005410void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
5411{
5412#if defined(CONFIG_BNXT_SRIOV)
5413 if (BNXT_VF(bp))
5414 bp->vf.max_stat_ctxs = max;
5415 else
5416#endif
5417 bp->pf.max_stat_ctxs = max;
5418}
5419
Michael Chane4060d32016-12-07 00:26:19 -05005420unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
5421{
5422#if defined(CONFIG_BNXT_SRIOV)
5423 if (BNXT_VF(bp))
5424 return bp->vf.max_cp_rings;
5425#endif
5426 return bp->pf.max_cp_rings;
5427}
5428
Michael Chana588e452016-12-07 00:26:21 -05005429void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
5430{
5431#if defined(CONFIG_BNXT_SRIOV)
5432 if (BNXT_VF(bp))
5433 bp->vf.max_cp_rings = max;
5434 else
5435#endif
5436 bp->pf.max_cp_rings = max;
5437}
5438
Michael Chan78095922016-12-07 00:26:16 -05005439static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5440{
5441#if defined(CONFIG_BNXT_SRIOV)
5442 if (BNXT_VF(bp))
Michael Chan68a946b2017-04-04 18:14:17 -04005443 return min_t(unsigned int, bp->vf.max_irqs,
5444 bp->vf.max_cp_rings);
Michael Chan78095922016-12-07 00:26:16 -05005445#endif
Michael Chan68a946b2017-04-04 18:14:17 -04005446 return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
Michael Chan78095922016-12-07 00:26:16 -05005447}
5448
Michael Chan33c26572016-12-07 00:26:15 -05005449void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
5450{
5451#if defined(CONFIG_BNXT_SRIOV)
5452 if (BNXT_VF(bp))
5453 bp->vf.max_irqs = max_irqs;
5454 else
5455#endif
5456 bp->pf.max_irqs = max_irqs;
5457}
5458
Michael Chan78095922016-12-07 00:26:16 -05005459static int bnxt_init_msix(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005460{
Michael Chan01657bc2016-01-02 23:45:03 -05005461 int i, total_vecs, rc = 0, min = 1;
Michael Chan78095922016-12-07 00:26:16 -05005462 struct msix_entry *msix_ent;
Michael Chanc0c050c2015-10-22 16:01:17 -04005463
Michael Chan78095922016-12-07 00:26:16 -05005464 total_vecs = bnxt_get_max_func_irqs(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005465 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
5466 if (!msix_ent)
5467 return -ENOMEM;
5468
5469 for (i = 0; i < total_vecs; i++) {
5470 msix_ent[i].entry = i;
5471 msix_ent[i].vector = 0;
5472 }
5473
Michael Chan01657bc2016-01-02 23:45:03 -05005474 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
5475 min = 2;
5476
5477 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
Michael Chanc0c050c2015-10-22 16:01:17 -04005478 if (total_vecs < 0) {
5479 rc = -ENODEV;
5480 goto msix_setup_exit;
5481 }
5482
5483 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
5484 if (bp->irq_tbl) {
Michael Chan78095922016-12-07 00:26:16 -05005485 for (i = 0; i < total_vecs; i++)
5486 bp->irq_tbl[i].vector = msix_ent[i].vector;
Michael Chanc0c050c2015-10-22 16:01:17 -04005487
Michael Chan78095922016-12-07 00:26:16 -05005488 bp->total_irqs = total_vecs;
Michael Chanc0c050c2015-10-22 16:01:17 -04005489 /* Trim rings based upon num of vectors allocated */
Michael Chan6e6c5a52016-01-02 23:45:02 -05005490 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
Michael Chan01657bc2016-01-02 23:45:03 -05005491 total_vecs, min == 1);
Michael Chan6e6c5a52016-01-02 23:45:02 -05005492 if (rc)
5493 goto msix_setup_exit;
5494
Michael Chanc0c050c2015-10-22 16:01:17 -04005495 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
Michael Chan78095922016-12-07 00:26:16 -05005496 bp->cp_nr_rings = (min == 1) ?
5497 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5498 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04005499
Michael Chanc0c050c2015-10-22 16:01:17 -04005500 } else {
5501 rc = -ENOMEM;
5502 goto msix_setup_exit;
5503 }
5504 bp->flags |= BNXT_FLAG_USING_MSIX;
5505 kfree(msix_ent);
5506 return 0;
5507
5508msix_setup_exit:
Michael Chan78095922016-12-07 00:26:16 -05005509 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
5510 kfree(bp->irq_tbl);
5511 bp->irq_tbl = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04005512 pci_disable_msix(bp->pdev);
5513 kfree(msix_ent);
5514 return rc;
5515}
5516
Michael Chan78095922016-12-07 00:26:16 -05005517static int bnxt_init_inta(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005518{
Michael Chanc0c050c2015-10-22 16:01:17 -04005519 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
Michael Chan78095922016-12-07 00:26:16 -05005520 if (!bp->irq_tbl)
5521 return -ENOMEM;
5522
5523 bp->total_irqs = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04005524 bp->rx_nr_rings = 1;
5525 bp->tx_nr_rings = 1;
5526 bp->cp_nr_rings = 1;
5527 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
Michael Chan01657bc2016-01-02 23:45:03 -05005528 bp->flags |= BNXT_FLAG_SHARED_RINGS;
Michael Chanc0c050c2015-10-22 16:01:17 -04005529 bp->irq_tbl[0].vector = bp->pdev->irq;
Michael Chan78095922016-12-07 00:26:16 -05005530 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04005531}
5532
Michael Chan78095922016-12-07 00:26:16 -05005533static int bnxt_init_int_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005534{
5535 int rc = 0;
5536
5537 if (bp->flags & BNXT_FLAG_MSIX_CAP)
Michael Chan78095922016-12-07 00:26:16 -05005538 rc = bnxt_init_msix(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005539
Michael Chan1fa72e22016-04-25 02:30:49 -04005540 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005541 /* fallback to INTA */
Michael Chan78095922016-12-07 00:26:16 -05005542 rc = bnxt_init_inta(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005543 }
5544 return rc;
5545}
5546
Michael Chan78095922016-12-07 00:26:16 -05005547static void bnxt_clear_int_mode(struct bnxt *bp)
5548{
5549 if (bp->flags & BNXT_FLAG_USING_MSIX)
5550 pci_disable_msix(bp->pdev);
5551
5552 kfree(bp->irq_tbl);
5553 bp->irq_tbl = NULL;
5554 bp->flags &= ~BNXT_FLAG_USING_MSIX;
5555}
5556
Michael Chanc0c050c2015-10-22 16:01:17 -04005557static void bnxt_free_irq(struct bnxt *bp)
5558{
5559 struct bnxt_irq *irq;
5560 int i;
5561
5562#ifdef CONFIG_RFS_ACCEL
5563 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
5564 bp->dev->rx_cpu_rmap = NULL;
5565#endif
5566 if (!bp->irq_tbl)
5567 return;
5568
5569 for (i = 0; i < bp->cp_nr_rings; i++) {
5570 irq = &bp->irq_tbl[i];
Vasundhara Volam56f0fd82017-08-28 13:40:27 -04005571 if (irq->requested) {
5572 if (irq->have_cpumask) {
5573 irq_set_affinity_hint(irq->vector, NULL);
5574 free_cpumask_var(irq->cpu_mask);
5575 irq->have_cpumask = 0;
5576 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005577 free_irq(irq->vector, bp->bnapi[i]);
Vasundhara Volam56f0fd82017-08-28 13:40:27 -04005578 }
5579
Michael Chanc0c050c2015-10-22 16:01:17 -04005580 irq->requested = 0;
5581 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005582}
5583
5584static int bnxt_request_irq(struct bnxt *bp)
5585{
Michael Chanb81a90d2016-01-02 23:45:01 -05005586 int i, j, rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04005587 unsigned long flags = 0;
5588#ifdef CONFIG_RFS_ACCEL
5589 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
5590#endif
5591
5592 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
5593 flags = IRQF_SHARED;
5594
Michael Chanb81a90d2016-01-02 23:45:01 -05005595 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005596 struct bnxt_irq *irq = &bp->irq_tbl[i];
5597#ifdef CONFIG_RFS_ACCEL
Michael Chanb81a90d2016-01-02 23:45:01 -05005598 if (rmap && bp->bnapi[i]->rx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005599 rc = irq_cpu_rmap_add(rmap, irq->vector);
5600 if (rc)
5601 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05005602 j);
5603 j++;
Michael Chanc0c050c2015-10-22 16:01:17 -04005604 }
5605#endif
5606 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5607 bp->bnapi[i]);
5608 if (rc)
5609 break;
5610
5611 irq->requested = 1;
Vasundhara Volam56f0fd82017-08-28 13:40:27 -04005612
5613 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
5614 int numa_node = dev_to_node(&bp->pdev->dev);
5615
5616 irq->have_cpumask = 1;
5617 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
5618 irq->cpu_mask);
5619 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
5620 if (rc) {
5621 netdev_warn(bp->dev,
5622 "Set affinity failed, IRQ = %d\n",
5623 irq->vector);
5624 break;
5625 }
5626 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005627 }
5628 return rc;
5629}
5630
5631static void bnxt_del_napi(struct bnxt *bp)
5632{
5633 int i;
5634
5635 if (!bp->bnapi)
5636 return;
5637
5638 for (i = 0; i < bp->cp_nr_rings; i++) {
5639 struct bnxt_napi *bnapi = bp->bnapi[i];
5640
5641 napi_hash_del(&bnapi->napi);
5642 netif_napi_del(&bnapi->napi);
5643 }
Eric Dumazete5f6f562016-11-16 06:31:52 -08005644 /* We called napi_hash_del() before netif_napi_del(), we need
5645 * to respect an RCU grace period before freeing napi structures.
5646 */
5647 synchronize_net();
Michael Chanc0c050c2015-10-22 16:01:17 -04005648}
5649
5650static void bnxt_init_napi(struct bnxt *bp)
5651{
5652 int i;
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005653 unsigned int cp_nr_rings = bp->cp_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04005654 struct bnxt_napi *bnapi;
5655
5656 if (bp->flags & BNXT_FLAG_USING_MSIX) {
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005657 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5658 cp_nr_rings--;
5659 for (i = 0; i < cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005660 bnapi = bp->bnapi[i];
5661 netif_napi_add(bp->dev, &bnapi->napi,
5662 bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04005663 }
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005664 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5665 bnapi = bp->bnapi[cp_nr_rings];
5666 netif_napi_add(bp->dev, &bnapi->napi,
5667 bnxt_poll_nitroa0, 64);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005668 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005669 } else {
5670 bnapi = bp->bnapi[0];
5671 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04005672 }
5673}
5674
5675static void bnxt_disable_napi(struct bnxt *bp)
5676{
5677 int i;
5678
5679 if (!bp->bnapi)
5680 return;
5681
Michael Chanb356a2e2016-12-29 12:13:31 -05005682 for (i = 0; i < bp->cp_nr_rings; i++)
Michael Chanc0c050c2015-10-22 16:01:17 -04005683 napi_disable(&bp->bnapi[i]->napi);
Michael Chanc0c050c2015-10-22 16:01:17 -04005684}
5685
5686static void bnxt_enable_napi(struct bnxt *bp)
5687{
5688 int i;
5689
5690 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chanfa7e2812016-05-10 19:18:00 -04005691 bp->bnapi[i]->in_reset = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04005692 napi_enable(&bp->bnapi[i]->napi);
5693 }
5694}
5695
Michael Chan7df4ae92016-12-02 21:17:17 -05005696void bnxt_tx_disable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005697{
5698 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005699 struct bnxt_tx_ring_info *txr;
Michael Chanc0c050c2015-10-22 16:01:17 -04005700
Michael Chanb6ab4b02016-01-02 23:44:59 -05005701 if (bp->tx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005702 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005703 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005704 txr->dev_state = BNXT_DEV_STATE_CLOSING;
Michael Chanc0c050c2015-10-22 16:01:17 -04005705 }
5706 }
5707 /* Stop all TX queues */
5708 netif_tx_disable(bp->dev);
5709 netif_carrier_off(bp->dev);
5710}
5711
Michael Chan7df4ae92016-12-02 21:17:17 -05005712void bnxt_tx_enable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005713{
5714 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005715 struct bnxt_tx_ring_info *txr;
Michael Chanc0c050c2015-10-22 16:01:17 -04005716
5717 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005718 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005719 txr->dev_state = 0;
5720 }
5721 netif_tx_wake_all_queues(bp->dev);
5722 if (bp->link_info.link_up)
5723 netif_carrier_on(bp->dev);
5724}
5725
5726static void bnxt_report_link(struct bnxt *bp)
5727{
5728 if (bp->link_info.link_up) {
5729 const char *duplex;
5730 const char *flow_ctrl;
Deepak Khungar38a21b32017-04-21 20:11:24 -04005731 u32 speed;
5732 u16 fec;
Michael Chanc0c050c2015-10-22 16:01:17 -04005733
5734 netif_carrier_on(bp->dev);
5735 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5736 duplex = "full";
5737 else
5738 duplex = "half";
5739 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5740 flow_ctrl = "ON - receive & transmit";
5741 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5742 flow_ctrl = "ON - transmit";
5743 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5744 flow_ctrl = "ON - receive";
5745 else
5746 flow_ctrl = "none";
5747 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
Deepak Khungar38a21b32017-04-21 20:11:24 -04005748 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04005749 speed, duplex, flow_ctrl);
Michael Chan170ce012016-04-05 14:08:57 -04005750 if (bp->flags & BNXT_FLAG_EEE_CAP)
5751 netdev_info(bp->dev, "EEE is %s\n",
5752 bp->eee.eee_active ? "active" :
5753 "not active");
Michael Chane70c7522017-02-12 19:18:16 -05005754 fec = bp->link_info.fec_cfg;
5755 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
5756 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
5757 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
5758 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
5759 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
Michael Chanc0c050c2015-10-22 16:01:17 -04005760 } else {
5761 netif_carrier_off(bp->dev);
5762 netdev_err(bp->dev, "NIC Link is Down\n");
5763 }
5764}
5765
Michael Chan170ce012016-04-05 14:08:57 -04005766static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5767{
5768 int rc = 0;
5769 struct hwrm_port_phy_qcaps_input req = {0};
5770 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan93ed8112016-06-13 02:25:37 -04005771 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chan170ce012016-04-05 14:08:57 -04005772
5773 if (bp->hwrm_spec_code < 0x10201)
5774 return 0;
5775
5776 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5777
5778 mutex_lock(&bp->hwrm_cmd_lock);
5779 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5780 if (rc)
5781 goto hwrm_phy_qcaps_exit;
5782
Michael Chanacb20052017-07-24 12:34:20 -04005783 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
Michael Chan170ce012016-04-05 14:08:57 -04005784 struct ethtool_eee *eee = &bp->eee;
5785 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5786
5787 bp->flags |= BNXT_FLAG_EEE_CAP;
5788 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5789 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5790 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5791 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5792 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5793 }
Michael Chan520ad892017-03-08 18:44:35 -05005794 if (resp->supported_speeds_auto_mode)
5795 link_info->support_auto_speeds =
5796 le16_to_cpu(resp->supported_speeds_auto_mode);
Michael Chan170ce012016-04-05 14:08:57 -04005797
5798hwrm_phy_qcaps_exit:
5799 mutex_unlock(&bp->hwrm_cmd_lock);
5800 return rc;
5801}
5802
Michael Chanc0c050c2015-10-22 16:01:17 -04005803static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5804{
5805 int rc = 0;
5806 struct bnxt_link_info *link_info = &bp->link_info;
5807 struct hwrm_port_phy_qcfg_input req = {0};
5808 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5809 u8 link_up = link_info->link_up;
Michael Chan286ef9d2016-11-16 21:13:08 -05005810 u16 diff;
Michael Chanc0c050c2015-10-22 16:01:17 -04005811
5812 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5813
5814 mutex_lock(&bp->hwrm_cmd_lock);
5815 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5816 if (rc) {
5817 mutex_unlock(&bp->hwrm_cmd_lock);
5818 return rc;
5819 }
5820
5821 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5822 link_info->phy_link_status = resp->link;
Michael Chanacb20052017-07-24 12:34:20 -04005823 link_info->duplex = resp->duplex_cfg;
5824 if (bp->hwrm_spec_code >= 0x10800)
5825 link_info->duplex = resp->duplex_state;
Michael Chanc0c050c2015-10-22 16:01:17 -04005826 link_info->pause = resp->pause;
5827 link_info->auto_mode = resp->auto_mode;
5828 link_info->auto_pause_setting = resp->auto_pause;
Michael Chan32773602016-03-07 15:38:42 -05005829 link_info->lp_pause = resp->link_partner_adv_pause;
Michael Chanc0c050c2015-10-22 16:01:17 -04005830 link_info->force_pause_setting = resp->force_pause;
Michael Chanacb20052017-07-24 12:34:20 -04005831 link_info->duplex_setting = resp->duplex_cfg;
Michael Chanc0c050c2015-10-22 16:01:17 -04005832 if (link_info->phy_link_status == BNXT_LINK_LINK)
5833 link_info->link_speed = le16_to_cpu(resp->link_speed);
5834 else
5835 link_info->link_speed = 0;
5836 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
Michael Chanc0c050c2015-10-22 16:01:17 -04005837 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5838 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
Michael Chan32773602016-03-07 15:38:42 -05005839 link_info->lp_auto_link_speeds =
5840 le16_to_cpu(resp->link_partner_adv_speeds);
Michael Chanc0c050c2015-10-22 16:01:17 -04005841 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5842 link_info->phy_ver[0] = resp->phy_maj;
5843 link_info->phy_ver[1] = resp->phy_min;
5844 link_info->phy_ver[2] = resp->phy_bld;
5845 link_info->media_type = resp->media_type;
Michael Chan03efbec2016-04-11 04:11:11 -04005846 link_info->phy_type = resp->phy_type;
Michael Chan11f15ed2016-04-05 14:08:55 -04005847 link_info->transceiver = resp->xcvr_pkg_type;
Michael Chan170ce012016-04-05 14:08:57 -04005848 link_info->phy_addr = resp->eee_config_phy_addr &
5849 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
Ajit Khaparde42ee18f2016-05-15 03:04:44 -04005850 link_info->module_status = resp->module_status;
Michael Chanc0c050c2015-10-22 16:01:17 -04005851
Michael Chan170ce012016-04-05 14:08:57 -04005852 if (bp->flags & BNXT_FLAG_EEE_CAP) {
5853 struct ethtool_eee *eee = &bp->eee;
5854 u16 fw_speeds;
5855
5856 eee->eee_active = 0;
5857 if (resp->eee_config_phy_addr &
5858 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5859 eee->eee_active = 1;
5860 fw_speeds = le16_to_cpu(
5861 resp->link_partner_adv_eee_link_speed_mask);
5862 eee->lp_advertised =
5863 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5864 }
5865
5866 /* Pull initial EEE config */
5867 if (!chng_link_state) {
5868 if (resp->eee_config_phy_addr &
5869 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5870 eee->eee_enabled = 1;
5871
5872 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5873 eee->advertised =
5874 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5875
5876 if (resp->eee_config_phy_addr &
5877 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5878 __le32 tmr;
5879
5880 eee->tx_lpi_enabled = 1;
5881 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5882 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5883 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5884 }
5885 }
5886 }
Michael Chane70c7522017-02-12 19:18:16 -05005887
5888 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
5889 if (bp->hwrm_spec_code >= 0x10504)
5890 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
5891
Michael Chanc0c050c2015-10-22 16:01:17 -04005892 /* TODO: need to add more logic to report VF link */
5893 if (chng_link_state) {
5894 if (link_info->phy_link_status == BNXT_LINK_LINK)
5895 link_info->link_up = 1;
5896 else
5897 link_info->link_up = 0;
5898 if (link_up != link_info->link_up)
5899 bnxt_report_link(bp);
5900 } else {
5901 /* alwasy link down if not require to update link state */
5902 link_info->link_up = 0;
5903 }
5904 mutex_unlock(&bp->hwrm_cmd_lock);
Michael Chan286ef9d2016-11-16 21:13:08 -05005905
5906 diff = link_info->support_auto_speeds ^ link_info->advertising;
5907 if ((link_info->support_auto_speeds | diff) !=
5908 link_info->support_auto_speeds) {
5909 /* An advertised speed is no longer supported, so we need to
Michael Chan0eaa24b2017-01-25 02:55:08 -05005910 * update the advertisement settings. Caller holds RTNL
5911 * so we can modify link settings.
Michael Chan286ef9d2016-11-16 21:13:08 -05005912 */
Michael Chan286ef9d2016-11-16 21:13:08 -05005913 link_info->advertising = link_info->support_auto_speeds;
Michael Chan0eaa24b2017-01-25 02:55:08 -05005914 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
Michael Chan286ef9d2016-11-16 21:13:08 -05005915 bnxt_hwrm_set_link_setting(bp, true, false);
Michael Chan286ef9d2016-11-16 21:13:08 -05005916 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005917 return 0;
5918}
5919
Michael Chan10289be2016-05-15 03:04:49 -04005920static void bnxt_get_port_module_status(struct bnxt *bp)
5921{
5922 struct bnxt_link_info *link_info = &bp->link_info;
5923 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5924 u8 module_status;
5925
5926 if (bnxt_update_link(bp, true))
5927 return;
5928
5929 module_status = link_info->module_status;
5930 switch (module_status) {
5931 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5932 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5933 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5934 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5935 bp->pf.port_id);
5936 if (bp->hwrm_spec_code >= 0x10201) {
5937 netdev_warn(bp->dev, "Module part number %s\n",
5938 resp->phy_vendor_partnumber);
5939 }
5940 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5941 netdev_warn(bp->dev, "TX is disabled\n");
5942 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5943 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5944 }
5945}
5946
Michael Chanc0c050c2015-10-22 16:01:17 -04005947static void
5948bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5949{
5950 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
Michael Chanc9ee9512016-04-05 14:08:56 -04005951 if (bp->hwrm_spec_code >= 0x10201)
5952 req->auto_pause =
5953 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
Michael Chanc0c050c2015-10-22 16:01:17 -04005954 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5955 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5956 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
Michael Chan49b5c7a2016-03-28 19:46:06 -04005957 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
Michael Chanc0c050c2015-10-22 16:01:17 -04005958 req->enables |=
5959 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5960 } else {
5961 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5962 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5963 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5964 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5965 req->enables |=
5966 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
Michael Chanc9ee9512016-04-05 14:08:56 -04005967 if (bp->hwrm_spec_code >= 0x10201) {
5968 req->auto_pause = req->force_pause;
5969 req->enables |= cpu_to_le32(
5970 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5971 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005972 }
5973}
5974
5975static void bnxt_hwrm_set_link_common(struct bnxt *bp,
5976 struct hwrm_port_phy_cfg_input *req)
5977{
5978 u8 autoneg = bp->link_info.autoneg;
5979 u16 fw_link_speed = bp->link_info.req_link_speed;
Michael Chan68515a12016-12-29 12:13:34 -05005980 u16 advertising = bp->link_info.advertising;
Michael Chanc0c050c2015-10-22 16:01:17 -04005981
5982 if (autoneg & BNXT_AUTONEG_SPEED) {
5983 req->auto_mode |=
Michael Chan11f15ed2016-04-05 14:08:55 -04005984 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04005985
5986 req->enables |= cpu_to_le32(
5987 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
5988 req->auto_link_speed_mask = cpu_to_le16(advertising);
5989
5990 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
5991 req->flags |=
5992 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
5993 } else {
5994 req->force_link_speed = cpu_to_le16(fw_link_speed);
5995 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
5996 }
5997
Michael Chanc0c050c2015-10-22 16:01:17 -04005998 /* tell chimp that the setting takes effect immediately */
5999 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
6000}
6001
6002int bnxt_hwrm_set_pause(struct bnxt *bp)
6003{
6004 struct hwrm_port_phy_cfg_input req = {0};
6005 int rc;
6006
6007 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6008 bnxt_hwrm_set_pause_common(bp, &req);
6009
6010 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
6011 bp->link_info.force_link_chng)
6012 bnxt_hwrm_set_link_common(bp, &req);
6013
6014 mutex_lock(&bp->hwrm_cmd_lock);
6015 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6016 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
6017 /* since changing of pause setting doesn't trigger any link
6018 * change event, the driver needs to update the current pause
6019 * result upon successfully return of the phy_cfg command
6020 */
6021 bp->link_info.pause =
6022 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
6023 bp->link_info.auto_pause_setting = 0;
6024 if (!bp->link_info.force_link_chng)
6025 bnxt_report_link(bp);
6026 }
6027 bp->link_info.force_link_chng = false;
6028 mutex_unlock(&bp->hwrm_cmd_lock);
6029 return rc;
6030}
6031
Michael Chan939f7f02016-04-05 14:08:58 -04006032static void bnxt_hwrm_set_eee(struct bnxt *bp,
6033 struct hwrm_port_phy_cfg_input *req)
6034{
6035 struct ethtool_eee *eee = &bp->eee;
6036
6037 if (eee->eee_enabled) {
6038 u16 eee_speeds;
6039 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
6040
6041 if (eee->tx_lpi_enabled)
6042 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
6043 else
6044 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
6045
6046 req->flags |= cpu_to_le32(flags);
6047 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
6048 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
6049 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
6050 } else {
6051 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
6052 }
6053}
6054
6055int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
Michael Chanc0c050c2015-10-22 16:01:17 -04006056{
6057 struct hwrm_port_phy_cfg_input req = {0};
6058
6059 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6060 if (set_pause)
6061 bnxt_hwrm_set_pause_common(bp, &req);
6062
6063 bnxt_hwrm_set_link_common(bp, &req);
Michael Chan939f7f02016-04-05 14:08:58 -04006064
6065 if (set_eee)
6066 bnxt_hwrm_set_eee(bp, &req);
Michael Chanc0c050c2015-10-22 16:01:17 -04006067 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6068}
6069
Michael Chan33f7d552016-04-11 04:11:12 -04006070static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
6071{
6072 struct hwrm_port_phy_cfg_input req = {0};
6073
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04006074 if (!BNXT_SINGLE_PF(bp))
Michael Chan33f7d552016-04-11 04:11:12 -04006075 return 0;
6076
6077 if (pci_num_vf(bp->pdev))
6078 return 0;
6079
6080 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
Michael Chan16d663a2016-11-16 21:13:07 -05006081 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
Michael Chan33f7d552016-04-11 04:11:12 -04006082 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6083}
6084
Michael Chan5ad2cbe2017-01-13 01:32:03 -05006085static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
6086{
6087 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6088 struct hwrm_port_led_qcaps_input req = {0};
6089 struct bnxt_pf_info *pf = &bp->pf;
6090 int rc;
6091
6092 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
6093 return 0;
6094
6095 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
6096 req.port_id = cpu_to_le16(pf->port_id);
6097 mutex_lock(&bp->hwrm_cmd_lock);
6098 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6099 if (rc) {
6100 mutex_unlock(&bp->hwrm_cmd_lock);
6101 return rc;
6102 }
6103 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
6104 int i;
6105
6106 bp->num_leds = resp->num_leds;
6107 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
6108 bp->num_leds);
6109 for (i = 0; i < bp->num_leds; i++) {
6110 struct bnxt_led_info *led = &bp->leds[i];
6111 __le16 caps = led->led_state_caps;
6112
6113 if (!led->led_group_id ||
6114 !BNXT_LED_ALT_BLINK_CAP(caps)) {
6115 bp->num_leds = 0;
6116 break;
6117 }
6118 }
6119 }
6120 mutex_unlock(&bp->hwrm_cmd_lock);
6121 return 0;
6122}
6123
Michael Chan5282db62017-04-04 18:14:10 -04006124int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
6125{
6126 struct hwrm_wol_filter_alloc_input req = {0};
6127 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6128 int rc;
6129
6130 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
6131 req.port_id = cpu_to_le16(bp->pf.port_id);
6132 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
6133 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
6134 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
6135 mutex_lock(&bp->hwrm_cmd_lock);
6136 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6137 if (!rc)
6138 bp->wol_filter_id = resp->wol_filter_id;
6139 mutex_unlock(&bp->hwrm_cmd_lock);
6140 return rc;
6141}
6142
6143int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
6144{
6145 struct hwrm_wol_filter_free_input req = {0};
6146 int rc;
6147
6148 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
6149 req.port_id = cpu_to_le16(bp->pf.port_id);
6150 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
6151 req.wol_filter_id = bp->wol_filter_id;
6152 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6153 return rc;
6154}
6155
Michael Chanc1ef1462017-04-04 18:14:07 -04006156static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
6157{
6158 struct hwrm_wol_filter_qcfg_input req = {0};
6159 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6160 u16 next_handle = 0;
6161 int rc;
6162
6163 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
6164 req.port_id = cpu_to_le16(bp->pf.port_id);
6165 req.handle = cpu_to_le16(handle);
6166 mutex_lock(&bp->hwrm_cmd_lock);
6167 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6168 if (!rc) {
6169 next_handle = le16_to_cpu(resp->next_handle);
6170 if (next_handle != 0) {
6171 if (resp->wol_type ==
6172 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
6173 bp->wol = 1;
6174 bp->wol_filter_id = resp->wol_filter_id;
6175 }
6176 }
6177 }
6178 mutex_unlock(&bp->hwrm_cmd_lock);
6179 return next_handle;
6180}
6181
6182static void bnxt_get_wol_settings(struct bnxt *bp)
6183{
6184 u16 handle = 0;
6185
6186 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
6187 return;
6188
6189 do {
6190 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
6191 } while (handle && handle != 0xffff);
6192}
6193
Michael Chan939f7f02016-04-05 14:08:58 -04006194static bool bnxt_eee_config_ok(struct bnxt *bp)
6195{
6196 struct ethtool_eee *eee = &bp->eee;
6197 struct bnxt_link_info *link_info = &bp->link_info;
6198
6199 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
6200 return true;
6201
6202 if (eee->eee_enabled) {
6203 u32 advertising =
6204 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
6205
6206 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6207 eee->eee_enabled = 0;
6208 return false;
6209 }
6210 if (eee->advertised & ~advertising) {
6211 eee->advertised = advertising & eee->supported;
6212 return false;
6213 }
6214 }
6215 return true;
6216}
6217
Michael Chanc0c050c2015-10-22 16:01:17 -04006218static int bnxt_update_phy_setting(struct bnxt *bp)
6219{
6220 int rc;
6221 bool update_link = false;
6222 bool update_pause = false;
Michael Chan939f7f02016-04-05 14:08:58 -04006223 bool update_eee = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04006224 struct bnxt_link_info *link_info = &bp->link_info;
6225
6226 rc = bnxt_update_link(bp, true);
6227 if (rc) {
6228 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
6229 rc);
6230 return rc;
6231 }
Michael Chan33dac242017-02-12 19:18:15 -05006232 if (!BNXT_SINGLE_PF(bp))
6233 return 0;
6234
Michael Chanc0c050c2015-10-22 16:01:17 -04006235 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
Michael Chanc9ee9512016-04-05 14:08:56 -04006236 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
6237 link_info->req_flow_ctrl)
Michael Chanc0c050c2015-10-22 16:01:17 -04006238 update_pause = true;
6239 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6240 link_info->force_pause_setting != link_info->req_flow_ctrl)
6241 update_pause = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04006242 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6243 if (BNXT_AUTO_MODE(link_info->auto_mode))
6244 update_link = true;
6245 if (link_info->req_link_speed != link_info->force_link_speed)
6246 update_link = true;
Michael Chande730182016-02-19 19:43:20 -05006247 if (link_info->req_duplex != link_info->duplex_setting)
6248 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04006249 } else {
6250 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
6251 update_link = true;
6252 if (link_info->advertising != link_info->auto_link_speeds)
6253 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04006254 }
6255
Michael Chan16d663a2016-11-16 21:13:07 -05006256 /* The last close may have shutdown the link, so need to call
6257 * PHY_CFG to bring it back up.
6258 */
6259 if (!netif_carrier_ok(bp->dev))
6260 update_link = true;
6261
Michael Chan939f7f02016-04-05 14:08:58 -04006262 if (!bnxt_eee_config_ok(bp))
6263 update_eee = true;
6264
Michael Chanc0c050c2015-10-22 16:01:17 -04006265 if (update_link)
Michael Chan939f7f02016-04-05 14:08:58 -04006266 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
Michael Chanc0c050c2015-10-22 16:01:17 -04006267 else if (update_pause)
6268 rc = bnxt_hwrm_set_pause(bp);
6269 if (rc) {
6270 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
6271 rc);
6272 return rc;
6273 }
6274
6275 return rc;
6276}
6277
Jeffrey Huang11809492015-11-05 16:25:49 -05006278/* Common routine to pre-map certain register block to different GRC window.
6279 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
6280 * in PF and 3 windows in VF that can be customized to map in different
6281 * register blocks.
6282 */
6283static void bnxt_preset_reg_win(struct bnxt *bp)
6284{
6285 if (BNXT_PF(bp)) {
6286 /* CAG registers map to GRC window #4 */
6287 writel(BNXT_CAG_REG_BASE,
6288 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
6289 }
6290}
6291
Michael Chanc0c050c2015-10-22 16:01:17 -04006292static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6293{
6294 int rc = 0;
6295
Jeffrey Huang11809492015-11-05 16:25:49 -05006296 bnxt_preset_reg_win(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006297 netif_carrier_off(bp->dev);
6298 if (irq_re_init) {
6299 rc = bnxt_setup_int_mode(bp);
6300 if (rc) {
6301 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
6302 rc);
6303 return rc;
6304 }
6305 }
6306 if ((bp->flags & BNXT_FLAG_RFS) &&
6307 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
6308 /* disable RFS if falling back to INTA */
6309 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
6310 bp->flags &= ~BNXT_FLAG_RFS;
6311 }
6312
6313 rc = bnxt_alloc_mem(bp, irq_re_init);
6314 if (rc) {
6315 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6316 goto open_err_free_mem;
6317 }
6318
6319 if (irq_re_init) {
6320 bnxt_init_napi(bp);
6321 rc = bnxt_request_irq(bp);
6322 if (rc) {
6323 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
6324 goto open_err;
6325 }
6326 }
6327
6328 bnxt_enable_napi(bp);
6329
6330 rc = bnxt_init_nic(bp, irq_re_init);
6331 if (rc) {
6332 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6333 goto open_err;
6334 }
6335
6336 if (link_re_init) {
6337 rc = bnxt_update_phy_setting(bp);
6338 if (rc)
Michael Chanba41d462016-02-19 19:43:21 -05006339 netdev_warn(bp->dev, "failed to update phy settings\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04006340 }
6341
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006342 if (irq_re_init)
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006343 udp_tunnel_get_rx_info(bp->dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04006344
Michael Chancaefe522015-12-09 19:35:42 -05006345 set_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006346 bnxt_enable_int(bp);
6347 /* Enable TX queues */
6348 bnxt_tx_enable(bp);
6349 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan10289be2016-05-15 03:04:49 -04006350 /* Poll link status and check for SFP+ module status */
6351 bnxt_get_port_module_status(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006352
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04006353 /* VF-reps may need to be re-opened after the PF is re-opened */
6354 if (BNXT_PF(bp))
6355 bnxt_vf_reps_open(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006356 return 0;
6357
6358open_err:
6359 bnxt_disable_napi(bp);
6360 bnxt_del_napi(bp);
6361
6362open_err_free_mem:
6363 bnxt_free_skbs(bp);
6364 bnxt_free_irq(bp);
6365 bnxt_free_mem(bp, true);
6366 return rc;
6367}
6368
6369/* rtnl_lock held */
6370int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6371{
6372 int rc = 0;
6373
6374 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
6375 if (rc) {
6376 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
6377 dev_close(bp->dev);
6378 }
6379 return rc;
6380}
6381
Michael Chanf7dc1ea2017-04-04 18:14:13 -04006382/* rtnl_lock held, open the NIC half way by allocating all resources, but
6383 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
6384 * self tests.
6385 */
6386int bnxt_half_open_nic(struct bnxt *bp)
6387{
6388 int rc = 0;
6389
6390 rc = bnxt_alloc_mem(bp, false);
6391 if (rc) {
6392 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6393 goto half_open_err;
6394 }
6395 rc = bnxt_init_nic(bp, false);
6396 if (rc) {
6397 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6398 goto half_open_err;
6399 }
6400 return 0;
6401
6402half_open_err:
6403 bnxt_free_skbs(bp);
6404 bnxt_free_mem(bp, false);
6405 dev_close(bp->dev);
6406 return rc;
6407}
6408
6409/* rtnl_lock held, this call can only be made after a previous successful
6410 * call to bnxt_half_open_nic().
6411 */
6412void bnxt_half_close_nic(struct bnxt *bp)
6413{
6414 bnxt_hwrm_resource_free(bp, false, false);
6415 bnxt_free_skbs(bp);
6416 bnxt_free_mem(bp, false);
6417}
6418
Michael Chanc0c050c2015-10-22 16:01:17 -04006419static int bnxt_open(struct net_device *dev)
6420{
6421 struct bnxt *bp = netdev_priv(dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04006422
Michael Chanc0c050c2015-10-22 16:01:17 -04006423 return __bnxt_open_nic(bp, true, true);
6424}
6425
Michael Chanf9b76eb2017-07-11 13:05:34 -04006426static bool bnxt_drv_busy(struct bnxt *bp)
6427{
6428 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
6429 test_bit(BNXT_STATE_READ_STATS, &bp->state));
6430}
6431
Michael Chanc0c050c2015-10-22 16:01:17 -04006432int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6433{
6434 int rc = 0;
6435
6436#ifdef CONFIG_BNXT_SRIOV
6437 if (bp->sriov_cfg) {
6438 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
6439 !bp->sriov_cfg,
6440 BNXT_SRIOV_CFG_WAIT_TMO);
6441 if (rc)
6442 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
6443 }
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04006444
6445 /* Close the VF-reps before closing PF */
6446 if (BNXT_PF(bp))
6447 bnxt_vf_reps_close(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006448#endif
6449 /* Change device state to avoid TX queue wake up's */
6450 bnxt_tx_disable(bp);
6451
Michael Chancaefe522015-12-09 19:35:42 -05006452 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chan4cebdce2015-12-09 19:35:43 -05006453 smp_mb__after_atomic();
Michael Chanf9b76eb2017-07-11 13:05:34 -04006454 while (bnxt_drv_busy(bp))
Michael Chan4cebdce2015-12-09 19:35:43 -05006455 msleep(20);
Michael Chanc0c050c2015-10-22 16:01:17 -04006456
Michael Chan9d8bc092016-12-29 12:13:33 -05006457 /* Flush rings and and disable interrupts */
Michael Chanc0c050c2015-10-22 16:01:17 -04006458 bnxt_shutdown_nic(bp, irq_re_init);
6459
6460 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
6461
6462 bnxt_disable_napi(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006463 del_timer_sync(&bp->timer);
6464 bnxt_free_skbs(bp);
6465
6466 if (irq_re_init) {
6467 bnxt_free_irq(bp);
6468 bnxt_del_napi(bp);
6469 }
6470 bnxt_free_mem(bp, irq_re_init);
6471 return rc;
6472}
6473
6474static int bnxt_close(struct net_device *dev)
6475{
6476 struct bnxt *bp = netdev_priv(dev);
6477
6478 bnxt_close_nic(bp, true, true);
Michael Chan33f7d552016-04-11 04:11:12 -04006479 bnxt_hwrm_shutdown_link(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006480 return 0;
6481}
6482
6483/* rtnl_lock held */
6484static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6485{
6486 switch (cmd) {
6487 case SIOCGMIIPHY:
6488 /* fallthru */
6489 case SIOCGMIIREG: {
6490 if (!netif_running(dev))
6491 return -EAGAIN;
6492
6493 return 0;
6494 }
6495
6496 case SIOCSMIIREG:
6497 if (!netif_running(dev))
6498 return -EAGAIN;
6499
6500 return 0;
6501
6502 default:
6503 /* do nothing */
6504 break;
6505 }
6506 return -EOPNOTSUPP;
6507}
6508
stephen hemmingerbc1f4472017-01-06 19:12:52 -08006509static void
Michael Chanc0c050c2015-10-22 16:01:17 -04006510bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6511{
6512 u32 i;
6513 struct bnxt *bp = netdev_priv(dev);
6514
Michael Chanf9b76eb2017-07-11 13:05:34 -04006515 set_bit(BNXT_STATE_READ_STATS, &bp->state);
6516 /* Make sure bnxt_close_nic() sees that we are reading stats before
6517 * we check the BNXT_STATE_OPEN flag.
6518 */
6519 smp_mb__after_atomic();
6520 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6521 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
stephen hemmingerbc1f4472017-01-06 19:12:52 -08006522 return;
Michael Chanf9b76eb2017-07-11 13:05:34 -04006523 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006524
6525 /* TODO check if we need to synchronize with bnxt_close path */
6526 for (i = 0; i < bp->cp_nr_rings; i++) {
6527 struct bnxt_napi *bnapi = bp->bnapi[i];
6528 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6529 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
6530
6531 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
6532 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
6533 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
6534
6535 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
6536 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
6537 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
6538
6539 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
6540 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
6541 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
6542
6543 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
6544 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
6545 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
6546
6547 stats->rx_missed_errors +=
6548 le64_to_cpu(hw_stats->rx_discard_pkts);
6549
6550 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
6551
Michael Chanc0c050c2015-10-22 16:01:17 -04006552 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
6553 }
6554
Michael Chan9947f832016-03-07 15:38:46 -05006555 if (bp->flags & BNXT_FLAG_PORT_STATS) {
6556 struct rx_port_stats *rx = bp->hw_rx_port_stats;
6557 struct tx_port_stats *tx = bp->hw_tx_port_stats;
6558
6559 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
6560 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
6561 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
6562 le64_to_cpu(rx->rx_ovrsz_frames) +
6563 le64_to_cpu(rx->rx_runt_frames);
6564 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
6565 le64_to_cpu(rx->rx_jbr_frames);
6566 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
6567 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
6568 stats->tx_errors = le64_to_cpu(tx->tx_err);
6569 }
Michael Chanf9b76eb2017-07-11 13:05:34 -04006570 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006571}
6572
6573static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
6574{
6575 struct net_device *dev = bp->dev;
6576 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6577 struct netdev_hw_addr *ha;
6578 u8 *haddr;
6579 int mc_count = 0;
6580 bool update = false;
6581 int off = 0;
6582
6583 netdev_for_each_mc_addr(ha, dev) {
6584 if (mc_count >= BNXT_MAX_MC_ADDRS) {
6585 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6586 vnic->mc_list_count = 0;
6587 return false;
6588 }
6589 haddr = ha->addr;
6590 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
6591 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
6592 update = true;
6593 }
6594 off += ETH_ALEN;
6595 mc_count++;
6596 }
6597 if (mc_count)
6598 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
6599
6600 if (mc_count != vnic->mc_list_count) {
6601 vnic->mc_list_count = mc_count;
6602 update = true;
6603 }
6604 return update;
6605}
6606
6607static bool bnxt_uc_list_updated(struct bnxt *bp)
6608{
6609 struct net_device *dev = bp->dev;
6610 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6611 struct netdev_hw_addr *ha;
6612 int off = 0;
6613
6614 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
6615 return true;
6616
6617 netdev_for_each_uc_addr(ha, dev) {
6618 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
6619 return true;
6620
6621 off += ETH_ALEN;
6622 }
6623 return false;
6624}
6625
6626static void bnxt_set_rx_mode(struct net_device *dev)
6627{
6628 struct bnxt *bp = netdev_priv(dev);
6629 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6630 u32 mask = vnic->rx_mask;
6631 bool mc_update = false;
6632 bool uc_update;
6633
6634 if (!netif_running(dev))
6635 return;
6636
6637 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
6638 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
6639 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
6640
Michael Chan17c71ac2016-07-01 18:46:27 -04006641 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04006642 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6643
6644 uc_update = bnxt_uc_list_updated(bp);
6645
6646 if (dev->flags & IFF_ALLMULTI) {
6647 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6648 vnic->mc_list_count = 0;
6649 } else {
6650 mc_update = bnxt_mc_list_updated(bp, &mask);
6651 }
6652
6653 if (mask != vnic->rx_mask || uc_update || mc_update) {
6654 vnic->rx_mask = mask;
6655
6656 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6657 schedule_work(&bp->sp_task);
6658 }
6659}
6660
Michael Chanb664f002015-12-02 01:54:08 -05006661static int bnxt_cfg_rx_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04006662{
6663 struct net_device *dev = bp->dev;
6664 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6665 struct netdev_hw_addr *ha;
6666 int i, off = 0, rc;
6667 bool uc_update;
6668
6669 netif_addr_lock_bh(dev);
6670 uc_update = bnxt_uc_list_updated(bp);
6671 netif_addr_unlock_bh(dev);
6672
6673 if (!uc_update)
6674 goto skip_uc;
6675
6676 mutex_lock(&bp->hwrm_cmd_lock);
6677 for (i = 1; i < vnic->uc_filter_count; i++) {
6678 struct hwrm_cfa_l2_filter_free_input req = {0};
6679
6680 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
6681 -1);
6682
6683 req.l2_filter_id = vnic->fw_l2_filter_id[i];
6684
6685 rc = _hwrm_send_message(bp, &req, sizeof(req),
6686 HWRM_CMD_TIMEOUT);
6687 }
6688 mutex_unlock(&bp->hwrm_cmd_lock);
6689
6690 vnic->uc_filter_count = 1;
6691
6692 netif_addr_lock_bh(dev);
6693 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
6694 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6695 } else {
6696 netdev_for_each_uc_addr(ha, dev) {
6697 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
6698 off += ETH_ALEN;
6699 vnic->uc_filter_count++;
6700 }
6701 }
6702 netif_addr_unlock_bh(dev);
6703
6704 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
6705 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
6706 if (rc) {
6707 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
6708 rc);
6709 vnic->uc_filter_count = i;
Michael Chanb664f002015-12-02 01:54:08 -05006710 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006711 }
6712 }
6713
6714skip_uc:
6715 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
6716 if (rc)
6717 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
6718 rc);
Michael Chanb664f002015-12-02 01:54:08 -05006719
6720 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006721}
6722
Michael Chan8079e8f2016-12-29 12:13:37 -05006723/* If the chip and firmware supports RFS */
6724static bool bnxt_rfs_supported(struct bnxt *bp)
6725{
6726 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6727 return true;
Michael Chanae10ae72016-12-29 12:13:38 -05006728 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6729 return true;
Michael Chan8079e8f2016-12-29 12:13:37 -05006730 return false;
6731}
6732
6733/* If runtime conditions support RFS */
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006734static bool bnxt_rfs_capable(struct bnxt *bp)
6735{
6736#ifdef CONFIG_RFS_ACCEL
Michael Chan8079e8f2016-12-29 12:13:37 -05006737 int vnics, max_vnics, max_rss_ctxs;
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006738
Michael Chan964fd482017-02-12 19:18:13 -05006739 if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006740 return false;
6741
6742 vnics = 1 + bp->rx_nr_rings;
Michael Chan8079e8f2016-12-29 12:13:37 -05006743 max_vnics = bnxt_get_max_func_vnics(bp);
6744 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
Michael Chanae10ae72016-12-29 12:13:38 -05006745
6746 /* RSS contexts not a limiting factor */
6747 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6748 max_rss_ctxs = max_vnics;
Michael Chan8079e8f2016-12-29 12:13:37 -05006749 if (vnics > max_vnics || vnics > max_rss_ctxs) {
Vasundhara Volama2304902016-07-25 12:33:36 -04006750 netdev_warn(bp->dev,
6751 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
Michael Chan8079e8f2016-12-29 12:13:37 -05006752 min(max_rss_ctxs - 1, max_vnics - 1));
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006753 return false;
Vasundhara Volama2304902016-07-25 12:33:36 -04006754 }
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006755
6756 return true;
6757#else
6758 return false;
6759#endif
6760}
6761
Michael Chanc0c050c2015-10-22 16:01:17 -04006762static netdev_features_t bnxt_fix_features(struct net_device *dev,
6763 netdev_features_t features)
6764{
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006765 struct bnxt *bp = netdev_priv(dev);
6766
Vasundhara Volama2304902016-07-25 12:33:36 -04006767 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006768 features &= ~NETIF_F_NTUPLE;
Michael Chan5a9f6b22016-06-06 02:37:15 -04006769
6770 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6771 * turned on or off together.
6772 */
6773 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
6774 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
6775 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
6776 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6777 NETIF_F_HW_VLAN_STAG_RX);
6778 else
6779 features |= NETIF_F_HW_VLAN_CTAG_RX |
6780 NETIF_F_HW_VLAN_STAG_RX;
6781 }
Michael Chancf6645f2016-06-13 02:25:28 -04006782#ifdef CONFIG_BNXT_SRIOV
6783 if (BNXT_VF(bp)) {
6784 if (bp->vf.vlan) {
6785 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6786 NETIF_F_HW_VLAN_STAG_RX);
6787 }
6788 }
6789#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04006790 return features;
6791}
6792
6793static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
6794{
6795 struct bnxt *bp = netdev_priv(dev);
6796 u32 flags = bp->flags;
6797 u32 changes;
6798 int rc = 0;
6799 bool re_init = false;
6800 bool update_tpa = false;
6801
6802 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006803 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04006804 flags |= BNXT_FLAG_GRO;
6805 if (features & NETIF_F_LRO)
6806 flags |= BNXT_FLAG_LRO;
6807
Michael Chanbdbd1eb2016-12-29 12:13:43 -05006808 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
6809 flags &= ~BNXT_FLAG_TPA;
6810
Michael Chanc0c050c2015-10-22 16:01:17 -04006811 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6812 flags |= BNXT_FLAG_STRIP_VLAN;
6813
6814 if (features & NETIF_F_NTUPLE)
6815 flags |= BNXT_FLAG_RFS;
6816
6817 changes = flags ^ bp->flags;
6818 if (changes & BNXT_FLAG_TPA) {
6819 update_tpa = true;
6820 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6821 (flags & BNXT_FLAG_TPA) == 0)
6822 re_init = true;
6823 }
6824
6825 if (changes & ~BNXT_FLAG_TPA)
6826 re_init = true;
6827
6828 if (flags != bp->flags) {
6829 u32 old_flags = bp->flags;
6830
6831 bp->flags = flags;
6832
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006833 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04006834 if (update_tpa)
6835 bnxt_set_ring_params(bp);
6836 return rc;
6837 }
6838
6839 if (re_init) {
6840 bnxt_close_nic(bp, false, false);
6841 if (update_tpa)
6842 bnxt_set_ring_params(bp);
6843
6844 return bnxt_open_nic(bp, false, false);
6845 }
6846 if (update_tpa) {
6847 rc = bnxt_set_tpa(bp,
6848 (flags & BNXT_FLAG_TPA) ?
6849 true : false);
6850 if (rc)
6851 bp->flags = old_flags;
6852 }
6853 }
6854 return rc;
6855}
6856
Michael Chan9f554592016-01-02 23:44:58 -05006857static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6858{
Michael Chanb6ab4b02016-01-02 23:44:59 -05006859 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05006860 int i = bnapi->index;
6861
Michael Chan3b2b7d92016-01-02 23:45:00 -05006862 if (!txr)
6863 return;
6864
Michael Chan9f554592016-01-02 23:44:58 -05006865 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6866 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6867 txr->tx_cons);
6868}
6869
6870static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6871{
Michael Chanb6ab4b02016-01-02 23:44:59 -05006872 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05006873 int i = bnapi->index;
6874
Michael Chan3b2b7d92016-01-02 23:45:00 -05006875 if (!rxr)
6876 return;
6877
Michael Chan9f554592016-01-02 23:44:58 -05006878 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6879 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6880 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6881 rxr->rx_sw_agg_prod);
6882}
6883
6884static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6885{
6886 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6887 int i = bnapi->index;
6888
6889 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6890 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6891}
6892
Michael Chanc0c050c2015-10-22 16:01:17 -04006893static void bnxt_dbg_dump_states(struct bnxt *bp)
6894{
6895 int i;
6896 struct bnxt_napi *bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04006897
6898 for (i = 0; i < bp->cp_nr_rings; i++) {
6899 bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04006900 if (netif_msg_drv(bp)) {
Michael Chan9f554592016-01-02 23:44:58 -05006901 bnxt_dump_tx_sw_state(bnapi);
6902 bnxt_dump_rx_sw_state(bnapi);
6903 bnxt_dump_cp_sw_state(bnapi);
Michael Chanc0c050c2015-10-22 16:01:17 -04006904 }
6905 }
6906}
6907
Michael Chan6988bd92016-06-13 02:25:29 -04006908static void bnxt_reset_task(struct bnxt *bp, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04006909{
Michael Chan6988bd92016-06-13 02:25:29 -04006910 if (!silent)
6911 bnxt_dbg_dump_states(bp);
Michael Chan028de142015-12-09 19:35:44 -05006912 if (netif_running(bp->dev)) {
Michael Chanb386cd32017-03-08 18:44:33 -05006913 int rc;
6914
6915 if (!silent)
6916 bnxt_ulp_stop(bp);
Michael Chan028de142015-12-09 19:35:44 -05006917 bnxt_close_nic(bp, false, false);
Michael Chanb386cd32017-03-08 18:44:33 -05006918 rc = bnxt_open_nic(bp, false, false);
6919 if (!silent && !rc)
6920 bnxt_ulp_start(bp);
Michael Chan028de142015-12-09 19:35:44 -05006921 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006922}
6923
6924static void bnxt_tx_timeout(struct net_device *dev)
6925{
6926 struct bnxt *bp = netdev_priv(dev);
6927
6928 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6929 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6930 schedule_work(&bp->sp_task);
6931}
6932
6933#ifdef CONFIG_NET_POLL_CONTROLLER
6934static void bnxt_poll_controller(struct net_device *dev)
6935{
6936 struct bnxt *bp = netdev_priv(dev);
6937 int i;
6938
Michael Chan2270bc52017-06-23 14:01:01 -04006939 /* Only process tx rings/combined rings in netpoll mode. */
6940 for (i = 0; i < bp->tx_nr_rings; i++) {
6941 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04006942
Michael Chan2270bc52017-06-23 14:01:01 -04006943 napi_schedule(&txr->bnapi->napi);
Michael Chanc0c050c2015-10-22 16:01:17 -04006944 }
6945}
6946#endif
6947
6948static void bnxt_timer(unsigned long data)
6949{
6950 struct bnxt *bp = (struct bnxt *)data;
6951 struct net_device *dev = bp->dev;
6952
6953 if (!netif_running(dev))
6954 return;
6955
6956 if (atomic_read(&bp->intr_sem) != 0)
6957 goto bnxt_restart_timer;
6958
Michael Chanadcc3312017-07-24 12:34:24 -04006959 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
6960 bp->stats_coal_ticks) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05006961 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6962 schedule_work(&bp->sp_task);
6963 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006964bnxt_restart_timer:
6965 mod_timer(&bp->timer, jiffies + bp->current_interval);
6966}
6967
Michael Chana551ee92017-01-25 02:55:07 -05006968static void bnxt_rtnl_lock_sp(struct bnxt *bp)
Michael Chan6988bd92016-06-13 02:25:29 -04006969{
Michael Chana551ee92017-01-25 02:55:07 -05006970 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
6971 * set. If the device is being closed, bnxt_close() may be holding
Michael Chan6988bd92016-06-13 02:25:29 -04006972 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6973 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6974 */
6975 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6976 rtnl_lock();
Michael Chana551ee92017-01-25 02:55:07 -05006977}
6978
6979static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
6980{
Michael Chan6988bd92016-06-13 02:25:29 -04006981 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6982 rtnl_unlock();
6983}
6984
Michael Chana551ee92017-01-25 02:55:07 -05006985/* Only called from bnxt_sp_task() */
6986static void bnxt_reset(struct bnxt *bp, bool silent)
6987{
6988 bnxt_rtnl_lock_sp(bp);
6989 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6990 bnxt_reset_task(bp, silent);
6991 bnxt_rtnl_unlock_sp(bp);
6992}
6993
Michael Chanc0c050c2015-10-22 16:01:17 -04006994static void bnxt_cfg_ntp_filters(struct bnxt *);
6995
6996static void bnxt_sp_task(struct work_struct *work)
6997{
6998 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
Michael Chanc0c050c2015-10-22 16:01:17 -04006999
Michael Chan4cebdce2015-12-09 19:35:43 -05007000 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7001 smp_mb__after_atomic();
7002 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
7003 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04007004 return;
Michael Chan4cebdce2015-12-09 19:35:43 -05007005 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007006
7007 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
7008 bnxt_cfg_rx_mode(bp);
7009
7010 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
7011 bnxt_cfg_ntp_filters(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007012 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
7013 bnxt_hwrm_exec_fwd_req(bp);
7014 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
7015 bnxt_hwrm_tunnel_dst_port_alloc(
7016 bp, bp->vxlan_port,
7017 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7018 }
7019 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
7020 bnxt_hwrm_tunnel_dst_port_free(
7021 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7022 }
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07007023 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
7024 bnxt_hwrm_tunnel_dst_port_alloc(
7025 bp, bp->nge_port,
7026 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7027 }
7028 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
7029 bnxt_hwrm_tunnel_dst_port_free(
7030 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7031 }
Michael Chan3bdf56c2016-03-07 15:38:45 -05007032 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
7033 bnxt_hwrm_port_qstats(bp);
7034
Michael Chana551ee92017-01-25 02:55:07 -05007035 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7036 * must be the last functions to be called before exiting.
7037 */
Michael Chan0eaa24b2017-01-25 02:55:08 -05007038 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
7039 int rc = 0;
7040
7041 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
7042 &bp->sp_event))
7043 bnxt_hwrm_phy_qcaps(bp);
7044
7045 bnxt_rtnl_lock_sp(bp);
7046 if (test_bit(BNXT_STATE_OPEN, &bp->state))
7047 rc = bnxt_update_link(bp, true);
7048 bnxt_rtnl_unlock_sp(bp);
7049 if (rc)
7050 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
7051 rc);
7052 }
Michael Chan90c694b2017-01-25 02:55:09 -05007053 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
7054 bnxt_rtnl_lock_sp(bp);
7055 if (test_bit(BNXT_STATE_OPEN, &bp->state))
7056 bnxt_get_port_module_status(bp);
7057 bnxt_rtnl_unlock_sp(bp);
7058 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007059 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
7060 bnxt_reset(bp, false);
7061
7062 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
7063 bnxt_reset(bp, true);
7064
Michael Chanc0c050c2015-10-22 16:01:17 -04007065 smp_mb__before_atomic();
7066 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7067}
7068
Michael Chand1e79252017-02-06 16:55:38 -05007069/* Under rtnl_lock */
Michael Chan98fdbe72017-08-28 13:40:26 -04007070int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7071 int tx_xdp)
Michael Chand1e79252017-02-06 16:55:38 -05007072{
7073 int max_rx, max_tx, tx_sets = 1;
7074 int tx_rings_needed;
Michael Chand1e79252017-02-06 16:55:38 -05007075 int rc;
7076
Michael Chand1e79252017-02-06 16:55:38 -05007077 if (tcs)
7078 tx_sets = tcs;
7079
7080 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
7081 if (rc)
7082 return rc;
7083
7084 if (max_rx < rx)
7085 return -ENOMEM;
7086
Michael Chan5f449242017-02-06 16:55:40 -05007087 tx_rings_needed = tx * tx_sets + tx_xdp;
Michael Chand1e79252017-02-06 16:55:38 -05007088 if (max_tx < tx_rings_needed)
7089 return -ENOMEM;
7090
Michael Chan98fdbe72017-08-28 13:40:26 -04007091 return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
Michael Chand1e79252017-02-06 16:55:38 -05007092}
7093
Sathya Perla17086392017-02-20 19:25:18 -05007094static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
7095{
7096 if (bp->bar2) {
7097 pci_iounmap(pdev, bp->bar2);
7098 bp->bar2 = NULL;
7099 }
7100
7101 if (bp->bar1) {
7102 pci_iounmap(pdev, bp->bar1);
7103 bp->bar1 = NULL;
7104 }
7105
7106 if (bp->bar0) {
7107 pci_iounmap(pdev, bp->bar0);
7108 bp->bar0 = NULL;
7109 }
7110}
7111
7112static void bnxt_cleanup_pci(struct bnxt *bp)
7113{
7114 bnxt_unmap_bars(bp, bp->pdev);
7115 pci_release_regions(bp->pdev);
7116 pci_disable_device(bp->pdev);
7117}
7118
Michael Chanc0c050c2015-10-22 16:01:17 -04007119static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
7120{
7121 int rc;
7122 struct bnxt *bp = netdev_priv(dev);
7123
7124 SET_NETDEV_DEV(dev, &pdev->dev);
7125
7126 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7127 rc = pci_enable_device(pdev);
7128 if (rc) {
7129 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7130 goto init_err;
7131 }
7132
7133 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7134 dev_err(&pdev->dev,
7135 "Cannot find PCI device base address, aborting\n");
7136 rc = -ENODEV;
7137 goto init_err_disable;
7138 }
7139
7140 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7141 if (rc) {
7142 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7143 goto init_err_disable;
7144 }
7145
7146 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
7147 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7148 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7149 goto init_err_disable;
7150 }
7151
7152 pci_set_master(pdev);
7153
7154 bp->dev = dev;
7155 bp->pdev = pdev;
7156
7157 bp->bar0 = pci_ioremap_bar(pdev, 0);
7158 if (!bp->bar0) {
7159 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
7160 rc = -ENOMEM;
7161 goto init_err_release;
7162 }
7163
7164 bp->bar1 = pci_ioremap_bar(pdev, 2);
7165 if (!bp->bar1) {
7166 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
7167 rc = -ENOMEM;
7168 goto init_err_release;
7169 }
7170
7171 bp->bar2 = pci_ioremap_bar(pdev, 4);
7172 if (!bp->bar2) {
7173 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
7174 rc = -ENOMEM;
7175 goto init_err_release;
7176 }
7177
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007178 pci_enable_pcie_error_reporting(pdev);
7179
Michael Chanc0c050c2015-10-22 16:01:17 -04007180 INIT_WORK(&bp->sp_task, bnxt_sp_task);
7181
7182 spin_lock_init(&bp->ntp_fltr_lock);
7183
7184 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
7185 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
7186
Michael Chandfb5b892016-02-26 04:00:01 -05007187 /* tick values in micro seconds */
Michael Chandfc9c942016-02-26 04:00:03 -05007188 bp->rx_coal_ticks = 12;
7189 bp->rx_coal_bufs = 30;
Michael Chandfb5b892016-02-26 04:00:01 -05007190 bp->rx_coal_ticks_irq = 1;
7191 bp->rx_coal_bufs_irq = 2;
Michael Chanc0c050c2015-10-22 16:01:17 -04007192
Michael Chandfc9c942016-02-26 04:00:03 -05007193 bp->tx_coal_ticks = 25;
7194 bp->tx_coal_bufs = 30;
7195 bp->tx_coal_ticks_irq = 2;
7196 bp->tx_coal_bufs_irq = 2;
7197
Michael Chan51f30782016-07-01 18:46:29 -04007198 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
7199
Michael Chanc0c050c2015-10-22 16:01:17 -04007200 init_timer(&bp->timer);
7201 bp->timer.data = (unsigned long)bp;
7202 bp->timer.function = bnxt_timer;
7203 bp->current_interval = BNXT_TIMER_INTERVAL;
7204
Michael Chancaefe522015-12-09 19:35:42 -05007205 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04007206 return 0;
7207
7208init_err_release:
Sathya Perla17086392017-02-20 19:25:18 -05007209 bnxt_unmap_bars(bp, pdev);
Michael Chanc0c050c2015-10-22 16:01:17 -04007210 pci_release_regions(pdev);
7211
7212init_err_disable:
7213 pci_disable_device(pdev);
7214
7215init_err:
7216 return rc;
7217}
7218
7219/* rtnl_lock held */
7220static int bnxt_change_mac_addr(struct net_device *dev, void *p)
7221{
7222 struct sockaddr *addr = p;
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05007223 struct bnxt *bp = netdev_priv(dev);
7224 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04007225
7226 if (!is_valid_ether_addr(addr->sa_data))
7227 return -EADDRNOTAVAIL;
7228
Michael Chan84c33dd2016-04-11 04:11:13 -04007229 rc = bnxt_approve_mac(bp, addr->sa_data);
7230 if (rc)
7231 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04007232
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05007233 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
7234 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04007235
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05007236 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7237 if (netif_running(dev)) {
7238 bnxt_close_nic(bp, false, false);
7239 rc = bnxt_open_nic(bp, false, false);
7240 }
7241
7242 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04007243}
7244
7245/* rtnl_lock held */
7246static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
7247{
7248 struct bnxt *bp = netdev_priv(dev);
7249
Michael Chanc0c050c2015-10-22 16:01:17 -04007250 if (netif_running(dev))
7251 bnxt_close_nic(bp, false, false);
7252
7253 dev->mtu = new_mtu;
7254 bnxt_set_ring_params(bp);
7255
7256 if (netif_running(dev))
7257 return bnxt_open_nic(bp, false, false);
7258
7259 return 0;
7260}
7261
Michael Chanc5e3deb2016-12-02 21:17:15 -05007262int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
Michael Chanc0c050c2015-10-22 16:01:17 -04007263{
7264 struct bnxt *bp = netdev_priv(dev);
Michael Chan3ffb6a32016-11-11 00:11:42 -05007265 bool sh = false;
Michael Chand1e79252017-02-06 16:55:38 -05007266 int rc;
John Fastabend16e5cc62016-02-16 21:16:43 -08007267
Michael Chanc0c050c2015-10-22 16:01:17 -04007268 if (tc > bp->max_tc) {
Michael Chanb451c8b2017-02-12 19:18:17 -05007269 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04007270 tc, bp->max_tc);
7271 return -EINVAL;
7272 }
7273
7274 if (netdev_get_num_tc(dev) == tc)
7275 return 0;
7276
Michael Chan3ffb6a32016-11-11 00:11:42 -05007277 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7278 sh = true;
7279
Michael Chan98fdbe72017-08-28 13:40:26 -04007280 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
7281 sh, tc, bp->tx_nr_rings_xdp);
Michael Chand1e79252017-02-06 16:55:38 -05007282 if (rc)
7283 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04007284
7285 /* Needs to close the device and do hw resource re-allocations */
7286 if (netif_running(bp->dev))
7287 bnxt_close_nic(bp, true, false);
7288
7289 if (tc) {
7290 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
7291 netdev_set_num_tc(dev, tc);
7292 } else {
7293 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7294 netdev_reset_tc(dev);
7295 }
Michael Chan3ffb6a32016-11-11 00:11:42 -05007296 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7297 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04007298 bp->num_stat_ctxs = bp->cp_nr_rings;
7299
7300 if (netif_running(bp->dev))
7301 return bnxt_open_nic(bp, true, false);
7302
7303 return 0;
7304}
7305
Jiri Pirko2572ac52017-08-07 10:15:17 +02007306static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02007307 void *type_data)
Michael Chanc5e3deb2016-12-02 21:17:15 -05007308{
Jiri Pirkode4784c2017-08-07 10:15:32 +02007309 struct tc_mqprio_qopt *mqprio = type_data;
7310
Jiri Pirko2572ac52017-08-07 10:15:17 +02007311 if (type != TC_SETUP_MQPRIO)
Jiri Pirko38cf0422017-08-07 10:15:31 +02007312 return -EOPNOTSUPP;
Michael Chanc5e3deb2016-12-02 21:17:15 -05007313
Jiri Pirkode4784c2017-08-07 10:15:32 +02007314 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
Amritha Nambiar56f36ac2017-03-15 10:39:25 -07007315
Jiri Pirkode4784c2017-08-07 10:15:32 +02007316 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
Michael Chanc5e3deb2016-12-02 21:17:15 -05007317}
7318
Michael Chanc0c050c2015-10-22 16:01:17 -04007319#ifdef CONFIG_RFS_ACCEL
7320static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
7321 struct bnxt_ntuple_filter *f2)
7322{
7323 struct flow_keys *keys1 = &f1->fkeys;
7324 struct flow_keys *keys2 = &f2->fkeys;
7325
7326 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
7327 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
7328 keys1->ports.ports == keys2->ports.ports &&
7329 keys1->basic.ip_proto == keys2->basic.ip_proto &&
7330 keys1->basic.n_proto == keys2->basic.n_proto &&
Michael Chan61aad722017-02-12 19:18:14 -05007331 keys1->control.flags == keys2->control.flags &&
Michael Chana54c4d72016-07-25 12:33:35 -04007332 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
7333 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
Michael Chanc0c050c2015-10-22 16:01:17 -04007334 return true;
7335
7336 return false;
7337}
7338
7339static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7340 u16 rxq_index, u32 flow_id)
7341{
7342 struct bnxt *bp = netdev_priv(dev);
7343 struct bnxt_ntuple_filter *fltr, *new_fltr;
7344 struct flow_keys *fkeys;
7345 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
Michael Chana54c4d72016-07-25 12:33:35 -04007346 int rc = 0, idx, bit_id, l2_idx = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04007347 struct hlist_head *head;
7348
Michael Chana54c4d72016-07-25 12:33:35 -04007349 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
7350 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7351 int off = 0, j;
7352
7353 netif_addr_lock_bh(dev);
7354 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
7355 if (ether_addr_equal(eth->h_dest,
7356 vnic->uc_list + off)) {
7357 l2_idx = j + 1;
7358 break;
7359 }
7360 }
7361 netif_addr_unlock_bh(dev);
7362 if (!l2_idx)
7363 return -EINVAL;
7364 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007365 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
7366 if (!new_fltr)
7367 return -ENOMEM;
7368
7369 fkeys = &new_fltr->fkeys;
7370 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
7371 rc = -EPROTONOSUPPORT;
7372 goto err_free;
7373 }
7374
Michael Chandda0e742016-12-29 12:13:40 -05007375 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
7376 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
Michael Chanc0c050c2015-10-22 16:01:17 -04007377 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
7378 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
7379 rc = -EPROTONOSUPPORT;
7380 goto err_free;
7381 }
Michael Chandda0e742016-12-29 12:13:40 -05007382 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
7383 bp->hwrm_spec_code < 0x10601) {
7384 rc = -EPROTONOSUPPORT;
7385 goto err_free;
7386 }
Michael Chan61aad722017-02-12 19:18:14 -05007387 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
7388 bp->hwrm_spec_code < 0x10601) {
7389 rc = -EPROTONOSUPPORT;
7390 goto err_free;
7391 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007392
Michael Chana54c4d72016-07-25 12:33:35 -04007393 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04007394 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
7395
7396 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
7397 head = &bp->ntp_fltr_hash_tbl[idx];
7398 rcu_read_lock();
7399 hlist_for_each_entry_rcu(fltr, head, hash) {
7400 if (bnxt_fltr_match(fltr, new_fltr)) {
7401 rcu_read_unlock();
7402 rc = 0;
7403 goto err_free;
7404 }
7405 }
7406 rcu_read_unlock();
7407
7408 spin_lock_bh(&bp->ntp_fltr_lock);
Michael Chan84e86b92015-11-05 16:25:50 -05007409 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
7410 BNXT_NTP_FLTR_MAX_FLTR, 0);
7411 if (bit_id < 0) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007412 spin_unlock_bh(&bp->ntp_fltr_lock);
7413 rc = -ENOMEM;
7414 goto err_free;
7415 }
7416
Michael Chan84e86b92015-11-05 16:25:50 -05007417 new_fltr->sw_id = (u16)bit_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04007418 new_fltr->flow_id = flow_id;
Michael Chana54c4d72016-07-25 12:33:35 -04007419 new_fltr->l2_fltr_idx = l2_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04007420 new_fltr->rxq = rxq_index;
7421 hlist_add_head_rcu(&new_fltr->hash, head);
7422 bp->ntp_fltr_count++;
7423 spin_unlock_bh(&bp->ntp_fltr_lock);
7424
7425 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7426 schedule_work(&bp->sp_task);
7427
7428 return new_fltr->sw_id;
7429
7430err_free:
7431 kfree(new_fltr);
7432 return rc;
7433}
7434
7435static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7436{
7437 int i;
7438
7439 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
7440 struct hlist_head *head;
7441 struct hlist_node *tmp;
7442 struct bnxt_ntuple_filter *fltr;
7443 int rc;
7444
7445 head = &bp->ntp_fltr_hash_tbl[i];
7446 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
7447 bool del = false;
7448
7449 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
7450 if (rps_may_expire_flow(bp->dev, fltr->rxq,
7451 fltr->flow_id,
7452 fltr->sw_id)) {
7453 bnxt_hwrm_cfa_ntuple_filter_free(bp,
7454 fltr);
7455 del = true;
7456 }
7457 } else {
7458 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
7459 fltr);
7460 if (rc)
7461 del = true;
7462 else
7463 set_bit(BNXT_FLTR_VALID, &fltr->state);
7464 }
7465
7466 if (del) {
7467 spin_lock_bh(&bp->ntp_fltr_lock);
7468 hlist_del_rcu(&fltr->hash);
7469 bp->ntp_fltr_count--;
7470 spin_unlock_bh(&bp->ntp_fltr_lock);
7471 synchronize_rcu();
7472 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
7473 kfree(fltr);
7474 }
7475 }
7476 }
Jeffrey Huang19241362016-02-26 04:00:00 -05007477 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
7478 netdev_info(bp->dev, "Receive PF driver unload event!");
Michael Chanc0c050c2015-10-22 16:01:17 -04007479}
7480
7481#else
7482
7483static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7484{
7485}
7486
7487#endif /* CONFIG_RFS_ACCEL */
7488
Alexander Duyckad51b8e2016-06-16 12:21:19 -07007489static void bnxt_udp_tunnel_add(struct net_device *dev,
7490 struct udp_tunnel_info *ti)
Michael Chanc0c050c2015-10-22 16:01:17 -04007491{
7492 struct bnxt *bp = netdev_priv(dev);
7493
Alexander Duyckad51b8e2016-06-16 12:21:19 -07007494 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7495 return;
7496
Michael Chanc0c050c2015-10-22 16:01:17 -04007497 if (!netif_running(dev))
7498 return;
7499
Alexander Duyckad51b8e2016-06-16 12:21:19 -07007500 switch (ti->type) {
7501 case UDP_TUNNEL_TYPE_VXLAN:
7502 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
7503 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04007504
Alexander Duyckad51b8e2016-06-16 12:21:19 -07007505 bp->vxlan_port_cnt++;
7506 if (bp->vxlan_port_cnt == 1) {
7507 bp->vxlan_port = ti->port;
7508 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04007509 schedule_work(&bp->sp_task);
7510 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07007511 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07007512 case UDP_TUNNEL_TYPE_GENEVE:
7513 if (bp->nge_port_cnt && bp->nge_port != ti->port)
7514 return;
7515
7516 bp->nge_port_cnt++;
7517 if (bp->nge_port_cnt == 1) {
7518 bp->nge_port = ti->port;
7519 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
7520 }
7521 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07007522 default:
7523 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04007524 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07007525
7526 schedule_work(&bp->sp_task);
7527}
7528
7529static void bnxt_udp_tunnel_del(struct net_device *dev,
7530 struct udp_tunnel_info *ti)
7531{
7532 struct bnxt *bp = netdev_priv(dev);
7533
7534 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7535 return;
7536
7537 if (!netif_running(dev))
7538 return;
7539
7540 switch (ti->type) {
7541 case UDP_TUNNEL_TYPE_VXLAN:
7542 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
7543 return;
7544 bp->vxlan_port_cnt--;
7545
7546 if (bp->vxlan_port_cnt != 0)
7547 return;
7548
7549 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
7550 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07007551 case UDP_TUNNEL_TYPE_GENEVE:
7552 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
7553 return;
7554 bp->nge_port_cnt--;
7555
7556 if (bp->nge_port_cnt != 0)
7557 return;
7558
7559 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
7560 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07007561 default:
7562 return;
7563 }
7564
7565 schedule_work(&bp->sp_task);
Michael Chanc0c050c2015-10-22 16:01:17 -04007566}
7567
Michael Chan39d8ba22017-07-24 12:34:22 -04007568static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7569 struct net_device *dev, u32 filter_mask,
7570 int nlflags)
7571{
7572 struct bnxt *bp = netdev_priv(dev);
7573
7574 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
7575 nlflags, filter_mask, NULL);
7576}
7577
7578static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7579 u16 flags)
7580{
7581 struct bnxt *bp = netdev_priv(dev);
7582 struct nlattr *attr, *br_spec;
7583 int rem, rc = 0;
7584
7585 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
7586 return -EOPNOTSUPP;
7587
7588 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7589 if (!br_spec)
7590 return -EINVAL;
7591
7592 nla_for_each_nested(attr, br_spec, rem) {
7593 u16 mode;
7594
7595 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7596 continue;
7597
7598 if (nla_len(attr) < sizeof(mode))
7599 return -EINVAL;
7600
7601 mode = nla_get_u16(attr);
7602 if (mode == bp->br_mode)
7603 break;
7604
7605 rc = bnxt_hwrm_set_br_mode(bp, mode);
7606 if (!rc)
7607 bp->br_mode = mode;
7608 break;
7609 }
7610 return rc;
7611}
7612
Sathya Perlac124a622017-07-24 12:34:29 -04007613static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
7614 size_t len)
7615{
7616 struct bnxt *bp = netdev_priv(dev);
7617 int rc;
7618
7619 /* The PF and it's VF-reps only support the switchdev framework */
7620 if (!BNXT_PF(bp))
7621 return -EOPNOTSUPP;
7622
Sathya Perla53f70b82017-07-25 13:28:41 -04007623 rc = snprintf(buf, len, "p%d", bp->pf.port_id);
Sathya Perlac124a622017-07-24 12:34:29 -04007624
7625 if (rc >= len)
7626 return -EOPNOTSUPP;
7627 return 0;
7628}
7629
7630int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
7631{
7632 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
7633 return -EOPNOTSUPP;
7634
7635 /* The PF and it's VF-reps only support the switchdev framework */
7636 if (!BNXT_PF(bp))
7637 return -EOPNOTSUPP;
7638
7639 switch (attr->id) {
7640 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
7641 /* In SRIOV each PF-pool (PF + child VFs) serves as a
7642 * switching domain, the PF's perm mac-addr can be used
7643 * as the unique parent-id
7644 */
7645 attr->u.ppid.id_len = ETH_ALEN;
7646 ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
7647 break;
7648 default:
7649 return -EOPNOTSUPP;
7650 }
7651 return 0;
7652}
7653
7654static int bnxt_swdev_port_attr_get(struct net_device *dev,
7655 struct switchdev_attr *attr)
7656{
7657 return bnxt_port_attr_get(netdev_priv(dev), attr);
7658}
7659
7660static const struct switchdev_ops bnxt_switchdev_ops = {
7661 .switchdev_port_attr_get = bnxt_swdev_port_attr_get
7662};
7663
Michael Chanc0c050c2015-10-22 16:01:17 -04007664static const struct net_device_ops bnxt_netdev_ops = {
7665 .ndo_open = bnxt_open,
7666 .ndo_start_xmit = bnxt_start_xmit,
7667 .ndo_stop = bnxt_close,
7668 .ndo_get_stats64 = bnxt_get_stats64,
7669 .ndo_set_rx_mode = bnxt_set_rx_mode,
7670 .ndo_do_ioctl = bnxt_ioctl,
7671 .ndo_validate_addr = eth_validate_addr,
7672 .ndo_set_mac_address = bnxt_change_mac_addr,
7673 .ndo_change_mtu = bnxt_change_mtu,
7674 .ndo_fix_features = bnxt_fix_features,
7675 .ndo_set_features = bnxt_set_features,
7676 .ndo_tx_timeout = bnxt_tx_timeout,
7677#ifdef CONFIG_BNXT_SRIOV
7678 .ndo_get_vf_config = bnxt_get_vf_config,
7679 .ndo_set_vf_mac = bnxt_set_vf_mac,
7680 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
7681 .ndo_set_vf_rate = bnxt_set_vf_bw,
7682 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
7683 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
7684#endif
7685#ifdef CONFIG_NET_POLL_CONTROLLER
7686 .ndo_poll_controller = bnxt_poll_controller,
7687#endif
7688 .ndo_setup_tc = bnxt_setup_tc,
7689#ifdef CONFIG_RFS_ACCEL
7690 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
7691#endif
Alexander Duyckad51b8e2016-06-16 12:21:19 -07007692 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
7693 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
Michael Chanc6d30e82017-02-06 16:55:42 -05007694 .ndo_xdp = bnxt_xdp,
Michael Chan39d8ba22017-07-24 12:34:22 -04007695 .ndo_bridge_getlink = bnxt_bridge_getlink,
7696 .ndo_bridge_setlink = bnxt_bridge_setlink,
Sathya Perlac124a622017-07-24 12:34:29 -04007697 .ndo_get_phys_port_name = bnxt_get_phys_port_name
Michael Chanc0c050c2015-10-22 16:01:17 -04007698};
7699
7700static void bnxt_remove_one(struct pci_dev *pdev)
7701{
7702 struct net_device *dev = pci_get_drvdata(pdev);
7703 struct bnxt *bp = netdev_priv(dev);
7704
Sathya Perla4ab0c6a2017-07-24 12:34:27 -04007705 if (BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007706 bnxt_sriov_disable(bp);
Sathya Perla4ab0c6a2017-07-24 12:34:27 -04007707 bnxt_dl_unregister(bp);
7708 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007709
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007710 pci_disable_pcie_error_reporting(pdev);
Michael Chanc0c050c2015-10-22 16:01:17 -04007711 unregister_netdev(dev);
7712 cancel_work_sync(&bp->sp_task);
7713 bp->sp_event = 0;
7714
Michael Chan78095922016-12-07 00:26:16 -05007715 bnxt_clear_int_mode(bp);
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05007716 bnxt_hwrm_func_drv_unrgtr(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007717 bnxt_free_hwrm_resources(bp);
Deepak Khungare605db82017-05-29 19:06:04 -04007718 bnxt_free_hwrm_short_cmd_req(bp);
Michael Chaneb513652017-04-04 18:14:12 -04007719 bnxt_ethtool_free(bp);
Michael Chan7df4ae92016-12-02 21:17:17 -05007720 bnxt_dcb_free(bp);
Michael Chana588e452016-12-07 00:26:21 -05007721 kfree(bp->edev);
7722 bp->edev = NULL;
Michael Chanc6d30e82017-02-06 16:55:42 -05007723 if (bp->xdp_prog)
7724 bpf_prog_put(bp->xdp_prog);
Sathya Perla17086392017-02-20 19:25:18 -05007725 bnxt_cleanup_pci(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007726 free_netdev(dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04007727}
7728
7729static int bnxt_probe_phy(struct bnxt *bp)
7730{
7731 int rc = 0;
7732 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chanc0c050c2015-10-22 16:01:17 -04007733
Michael Chan170ce012016-04-05 14:08:57 -04007734 rc = bnxt_hwrm_phy_qcaps(bp);
7735 if (rc) {
7736 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
7737 rc);
7738 return rc;
7739 }
7740
Michael Chanc0c050c2015-10-22 16:01:17 -04007741 rc = bnxt_update_link(bp, false);
7742 if (rc) {
7743 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
7744 rc);
7745 return rc;
7746 }
7747
Michael Chan93ed8112016-06-13 02:25:37 -04007748 /* Older firmware does not have supported_auto_speeds, so assume
7749 * that all supported speeds can be autonegotiated.
7750 */
7751 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
7752 link_info->support_auto_speeds = link_info->support_speeds;
7753
Michael Chanc0c050c2015-10-22 16:01:17 -04007754 /*initialize the ethool setting copy with NVM settings */
Michael Chan0d8abf02016-02-10 17:33:47 -05007755 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
Michael Chanc9ee9512016-04-05 14:08:56 -04007756 link_info->autoneg = BNXT_AUTONEG_SPEED;
7757 if (bp->hwrm_spec_code >= 0x10201) {
7758 if (link_info->auto_pause_setting &
7759 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
7760 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7761 } else {
7762 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7763 }
Michael Chan0d8abf02016-02-10 17:33:47 -05007764 link_info->advertising = link_info->auto_link_speeds;
Michael Chan0d8abf02016-02-10 17:33:47 -05007765 } else {
7766 link_info->req_link_speed = link_info->force_link_speed;
7767 link_info->req_duplex = link_info->duplex_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04007768 }
Michael Chanc9ee9512016-04-05 14:08:56 -04007769 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
7770 link_info->req_flow_ctrl =
7771 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
7772 else
7773 link_info->req_flow_ctrl = link_info->force_pause_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04007774 return rc;
7775}
7776
7777static int bnxt_get_max_irq(struct pci_dev *pdev)
7778{
7779 u16 ctrl;
7780
7781 if (!pdev->msix_cap)
7782 return 1;
7783
7784 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
7785 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
7786}
7787
Michael Chan6e6c5a52016-01-02 23:45:02 -05007788static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7789 int *max_cp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007790{
Michael Chan6e6c5a52016-01-02 23:45:02 -05007791 int max_ring_grps = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04007792
Michael Chan379a80a2015-10-23 15:06:19 -04007793#ifdef CONFIG_BNXT_SRIOV
Arnd Bergmann415b6f12016-01-12 16:05:08 +01007794 if (!BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007795 *max_tx = bp->vf.max_tx_rings;
7796 *max_rx = bp->vf.max_rx_rings;
Michael Chan6e6c5a52016-01-02 23:45:02 -05007797 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
7798 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
Michael Chanb72d4a62015-12-27 18:19:27 -05007799 max_ring_grps = bp->vf.max_hw_ring_grps;
Arnd Bergmann415b6f12016-01-12 16:05:08 +01007800 } else
Michael Chan379a80a2015-10-23 15:06:19 -04007801#endif
Arnd Bergmann415b6f12016-01-12 16:05:08 +01007802 {
7803 *max_tx = bp->pf.max_tx_rings;
7804 *max_rx = bp->pf.max_rx_rings;
7805 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
7806 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
7807 max_ring_grps = bp->pf.max_hw_ring_grps;
Michael Chanc0c050c2015-10-22 16:01:17 -04007808 }
Prashant Sreedharan76595192016-07-18 07:15:22 -04007809 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
7810 *max_cp -= 1;
7811 *max_rx -= 2;
7812 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007813 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7814 *max_rx >>= 1;
Michael Chanb72d4a62015-12-27 18:19:27 -05007815 *max_rx = min_t(int, *max_rx, max_ring_grps);
Michael Chan6e6c5a52016-01-02 23:45:02 -05007816}
7817
7818int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
7819{
7820 int rx, tx, cp;
7821
7822 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
7823 if (!rx || !tx || !cp)
7824 return -ENOMEM;
7825
7826 *max_rx = rx;
7827 *max_tx = tx;
7828 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
7829}
7830
Michael Chane4060d32016-12-07 00:26:19 -05007831static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7832 bool shared)
7833{
7834 int rc;
7835
7836 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
Michael Chanbdbd1eb2016-12-29 12:13:43 -05007837 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
7838 /* Not enough rings, try disabling agg rings. */
7839 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7840 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7841 if (rc)
7842 return rc;
7843 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7844 bp->dev->hw_features &= ~NETIF_F_LRO;
7845 bp->dev->features &= ~NETIF_F_LRO;
7846 bnxt_set_ring_params(bp);
7847 }
Michael Chane4060d32016-12-07 00:26:19 -05007848
7849 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
7850 int max_cp, max_stat, max_irq;
7851
7852 /* Reserve minimum resources for RoCE */
7853 max_cp = bnxt_get_max_func_cp_rings(bp);
7854 max_stat = bnxt_get_max_func_stat_ctxs(bp);
7855 max_irq = bnxt_get_max_func_irqs(bp);
7856 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
7857 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
7858 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
7859 return 0;
7860
7861 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
7862 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
7863 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
7864 max_cp = min_t(int, max_cp, max_irq);
7865 max_cp = min_t(int, max_cp, max_stat);
7866 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
7867 if (rc)
7868 rc = 0;
7869 }
7870 return rc;
7871}
7872
Michael Chan702c2212017-05-29 19:06:10 -04007873static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
Michael Chan6e6c5a52016-01-02 23:45:02 -05007874{
7875 int dflt_rings, max_rx_rings, max_tx_rings, rc;
Michael Chan6e6c5a52016-01-02 23:45:02 -05007876
7877 if (sh)
7878 bp->flags |= BNXT_FLAG_SHARED_RINGS;
7879 dflt_rings = netif_get_num_default_rss_queues();
Michael Chane4060d32016-12-07 00:26:19 -05007880 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
Michael Chan6e6c5a52016-01-02 23:45:02 -05007881 if (rc)
7882 return rc;
7883 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
7884 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
Michael Chan391be5c2016-12-29 12:13:41 -05007885
7886 rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
7887 if (rc)
7888 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
7889
Michael Chan6e6c5a52016-01-02 23:45:02 -05007890 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7891 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7892 bp->tx_nr_rings + bp->rx_nr_rings;
7893 bp->num_stat_ctxs = bp->cp_nr_rings;
Prashant Sreedharan76595192016-07-18 07:15:22 -04007894 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7895 bp->rx_nr_rings++;
7896 bp->cp_nr_rings++;
7897 }
Michael Chan6e6c5a52016-01-02 23:45:02 -05007898 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04007899}
7900
Michael Chan7b08f662016-12-07 00:26:18 -05007901void bnxt_restore_pf_fw_resources(struct bnxt *bp)
7902{
7903 ASSERT_RTNL();
7904 bnxt_hwrm_func_qcaps(bp);
Michael Chana588e452016-12-07 00:26:21 -05007905 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
Michael Chan7b08f662016-12-07 00:26:18 -05007906}
7907
Ajit Khaparde90c4f782016-05-15 03:04:45 -04007908static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7909{
7910 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7911 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7912
7913 if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
7914 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7915 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7916 else
7917 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
7918 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
7919 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
7920 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
7921 "Unknown", width);
7922}
7923
Michael Chanc0c050c2015-10-22 16:01:17 -04007924static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7925{
7926 static int version_printed;
7927 struct net_device *dev;
7928 struct bnxt *bp;
Michael Chan6e6c5a52016-01-02 23:45:02 -05007929 int rc, max_irqs;
Michael Chanc0c050c2015-10-22 16:01:17 -04007930
Ray Jui4e003382017-02-20 19:25:16 -05007931 if (pci_is_bridge(pdev))
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -04007932 return -ENODEV;
7933
Michael Chanc0c050c2015-10-22 16:01:17 -04007934 if (version_printed++ == 0)
7935 pr_info("%s", version);
7936
7937 max_irqs = bnxt_get_max_irq(pdev);
7938 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
7939 if (!dev)
7940 return -ENOMEM;
7941
7942 bp = netdev_priv(dev);
7943
7944 if (bnxt_vf_pciid(ent->driver_data))
7945 bp->flags |= BNXT_FLAG_VF;
7946
Michael Chan2bcfa6f2015-12-27 18:19:24 -05007947 if (pdev->msix_cap)
Michael Chanc0c050c2015-10-22 16:01:17 -04007948 bp->flags |= BNXT_FLAG_MSIX_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -04007949
7950 rc = bnxt_init_board(pdev, dev);
7951 if (rc < 0)
7952 goto init_err_free;
7953
7954 dev->netdev_ops = &bnxt_netdev_ops;
7955 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
7956 dev->ethtool_ops = &bnxt_ethtool_ops;
David S. Millerbc880552017-07-24 21:20:16 -07007957 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
Michael Chanc0c050c2015-10-22 16:01:17 -04007958 pci_set_drvdata(pdev, dev);
7959
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04007960 rc = bnxt_alloc_hwrm_resources(bp);
7961 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -05007962 goto init_err_pci_clean;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04007963
7964 mutex_init(&bp->hwrm_cmd_lock);
7965 rc = bnxt_hwrm_ver_get(bp);
7966 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -05007967 goto init_err_pci_clean;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04007968
Deepak Khungare605db82017-05-29 19:06:04 -04007969 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
7970 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
7971 if (rc)
7972 goto init_err_pci_clean;
7973 }
7974
Michael Chan3c2217a2017-03-08 18:44:32 -05007975 rc = bnxt_hwrm_func_reset(bp);
7976 if (rc)
7977 goto init_err_pci_clean;
7978
Rob Swindell5ac67d82016-09-19 03:58:03 -04007979 bnxt_hwrm_fw_set_time(bp);
7980
Michael Chanc0c050c2015-10-22 16:01:17 -04007981 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7982 NETIF_F_TSO | NETIF_F_TSO6 |
7983 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Tom Herbert7e133182016-05-18 09:06:10 -07007984 NETIF_F_GSO_IPXIP4 |
Alexander Duyck152971e2016-05-02 09:38:55 -07007985 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7986 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04007987 NETIF_F_RXCSUM | NETIF_F_GRO;
7988
7989 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
7990 dev->hw_features |= NETIF_F_LRO;
Michael Chanc0c050c2015-10-22 16:01:17 -04007991
Michael Chanc0c050c2015-10-22 16:01:17 -04007992 dev->hw_enc_features =
7993 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7994 NETIF_F_TSO | NETIF_F_TSO6 |
7995 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Alexander Duyck152971e2016-05-02 09:38:55 -07007996 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07007997 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
Alexander Duyck152971e2016-05-02 09:38:55 -07007998 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
7999 NETIF_F_GSO_GRE_CSUM;
Michael Chanc0c050c2015-10-22 16:01:17 -04008000 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
8001 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
8002 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
8003 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
8004 dev->priv_flags |= IFF_UNICAST_FLT;
8005
Jarod Wilsone1c6dcc2016-10-17 15:54:04 -04008006 /* MTU range: 60 - 9500 */
8007 dev->min_mtu = ETH_ZLEN;
Michael Chanc61fb992017-02-06 16:55:36 -05008008 dev->max_mtu = BNXT_MAX_MTU;
Jarod Wilsone1c6dcc2016-10-17 15:54:04 -04008009
Michael Chanc0c050c2015-10-22 16:01:17 -04008010#ifdef CONFIG_BNXT_SRIOV
8011 init_waitqueue_head(&bp->sriov_cfg_wait);
Sathya Perla4ab0c6a2017-07-24 12:34:27 -04008012 mutex_init(&bp->sriov_lock);
Michael Chanc0c050c2015-10-22 16:01:17 -04008013#endif
Michael Chan309369c2016-06-13 02:25:34 -04008014 bp->gro_func = bnxt_gro_func_5730x;
Michael Chan3284f9e2017-05-29 19:06:07 -04008015 if (BNXT_CHIP_P4_PLUS(bp))
Michael Chan94758f82016-06-13 02:25:35 -04008016 bp->gro_func = bnxt_gro_func_5731x;
Michael Chan434c9752017-05-29 19:06:08 -04008017 else
8018 bp->flags |= BNXT_FLAG_DOUBLE_DB;
Michael Chan309369c2016-06-13 02:25:34 -04008019
Michael Chanc0c050c2015-10-22 16:01:17 -04008020 rc = bnxt_hwrm_func_drv_rgtr(bp);
8021 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -05008022 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -04008023
Michael Chana1653b12016-12-07 00:26:20 -05008024 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
8025 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -05008026 goto init_err_pci_clean;
Michael Chana1653b12016-12-07 00:26:20 -05008027
Michael Chana588e452016-12-07 00:26:21 -05008028 bp->ulp_probe = bnxt_ulp_probe;
8029
Michael Chanc0c050c2015-10-22 16:01:17 -04008030 /* Get the MAX capabilities for this function */
8031 rc = bnxt_hwrm_func_qcaps(bp);
8032 if (rc) {
8033 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
8034 rc);
8035 rc = -1;
Sathya Perla17086392017-02-20 19:25:18 -05008036 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -04008037 }
8038
8039 rc = bnxt_hwrm_queue_qportcfg(bp);
8040 if (rc) {
8041 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
8042 rc);
8043 rc = -1;
Sathya Perla17086392017-02-20 19:25:18 -05008044 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -04008045 }
8046
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04008047 bnxt_hwrm_func_qcfg(bp);
Michael Chan5ad2cbe2017-01-13 01:32:03 -05008048 bnxt_hwrm_port_led_qcaps(bp);
Michael Chaneb513652017-04-04 18:14:12 -04008049 bnxt_ethtool_init(bp);
Michael Chan87fe6032017-05-16 16:39:43 -04008050 bnxt_dcb_init(bp);
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04008051
Michael Chanc61fb992017-02-06 16:55:36 -05008052 bnxt_set_rx_skb_mode(bp, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04008053 bnxt_set_tpa_flags(bp);
8054 bnxt_set_ring_params(bp);
Michael Chan33c26572016-12-07 00:26:15 -05008055 bnxt_set_max_func_irqs(bp, max_irqs);
Michael Chan702c2212017-05-29 19:06:10 -04008056 rc = bnxt_set_dflt_rings(bp, true);
Michael Chanbdbd1eb2016-12-29 12:13:43 -05008057 if (rc) {
8058 netdev_err(bp->dev, "Not enough rings available.\n");
8059 rc = -ENOMEM;
Sathya Perla17086392017-02-20 19:25:18 -05008060 goto init_err_pci_clean;
Michael Chanbdbd1eb2016-12-29 12:13:43 -05008061 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008062
Michael Chan87da7f72016-11-16 21:13:09 -05008063 /* Default RSS hash cfg. */
8064 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
8065 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
8066 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
8067 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
Michael Chan3284f9e2017-05-29 19:06:07 -04008068 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
Michael Chan87da7f72016-11-16 21:13:09 -05008069 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
8070 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
8071 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
8072 }
8073
Michael Chan8fdefd62016-12-29 12:13:36 -05008074 bnxt_hwrm_vnic_qcaps(bp);
Michael Chan8079e8f2016-12-29 12:13:37 -05008075 if (bnxt_rfs_supported(bp)) {
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008076 dev->hw_features |= NETIF_F_NTUPLE;
8077 if (bnxt_rfs_capable(bp)) {
8078 bp->flags |= BNXT_FLAG_RFS;
8079 dev->features |= NETIF_F_NTUPLE;
8080 }
8081 }
8082
Michael Chanc0c050c2015-10-22 16:01:17 -04008083 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
8084 bp->flags |= BNXT_FLAG_STRIP_VLAN;
8085
8086 rc = bnxt_probe_phy(bp);
8087 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -05008088 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -04008089
Michael Chan78095922016-12-07 00:26:16 -05008090 rc = bnxt_init_int_mode(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008091 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -05008092 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -04008093
Michael Chanc1ef1462017-04-04 18:14:07 -04008094 bnxt_get_wol_settings(bp);
Michael Chand196ece2017-04-04 18:14:08 -04008095 if (bp->flags & BNXT_FLAG_WOL_CAP)
8096 device_set_wakeup_enable(&pdev->dev, bp->wol);
8097 else
8098 device_set_wakeup_capable(&pdev->dev, false);
Michael Chanc1ef1462017-04-04 18:14:07 -04008099
Michael Chan78095922016-12-07 00:26:16 -05008100 rc = register_netdev(dev);
8101 if (rc)
8102 goto init_err_clr_int;
8103
Sathya Perla4ab0c6a2017-07-24 12:34:27 -04008104 if (BNXT_PF(bp))
8105 bnxt_dl_register(bp);
8106
Michael Chanc0c050c2015-10-22 16:01:17 -04008107 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
8108 board_info[ent->driver_data].name,
8109 (long)pci_resource_start(pdev, 0), dev->dev_addr);
8110
Ajit Khaparde90c4f782016-05-15 03:04:45 -04008111 bnxt_parse_log_pcie_link(bp);
8112
Michael Chanc0c050c2015-10-22 16:01:17 -04008113 return 0;
8114
Michael Chan78095922016-12-07 00:26:16 -05008115init_err_clr_int:
8116 bnxt_clear_int_mode(bp);
8117
Sathya Perla17086392017-02-20 19:25:18 -05008118init_err_pci_clean:
8119 bnxt_cleanup_pci(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008120
8121init_err_free:
8122 free_netdev(dev);
8123 return rc;
8124}
8125
Michael Chand196ece2017-04-04 18:14:08 -04008126static void bnxt_shutdown(struct pci_dev *pdev)
8127{
8128 struct net_device *dev = pci_get_drvdata(pdev);
8129 struct bnxt *bp;
8130
8131 if (!dev)
8132 return;
8133
8134 rtnl_lock();
8135 bp = netdev_priv(dev);
8136 if (!bp)
8137 goto shutdown_exit;
8138
8139 if (netif_running(dev))
8140 dev_close(dev);
8141
8142 if (system_state == SYSTEM_POWER_OFF) {
Michael Chan0efd2fc2017-05-29 19:06:06 -04008143 bnxt_ulp_shutdown(bp);
Michael Chand196ece2017-04-04 18:14:08 -04008144 bnxt_clear_int_mode(bp);
8145 pci_wake_from_d3(pdev, bp->wol);
8146 pci_set_power_state(pdev, PCI_D3hot);
8147 }
8148
8149shutdown_exit:
8150 rtnl_unlock();
8151}
8152
Michael Chanf65a2042017-04-04 18:14:11 -04008153#ifdef CONFIG_PM_SLEEP
8154static int bnxt_suspend(struct device *device)
8155{
8156 struct pci_dev *pdev = to_pci_dev(device);
8157 struct net_device *dev = pci_get_drvdata(pdev);
8158 struct bnxt *bp = netdev_priv(dev);
8159 int rc = 0;
8160
8161 rtnl_lock();
8162 if (netif_running(dev)) {
8163 netif_device_detach(dev);
8164 rc = bnxt_close(dev);
8165 }
8166 bnxt_hwrm_func_drv_unrgtr(bp);
8167 rtnl_unlock();
8168 return rc;
8169}
8170
8171static int bnxt_resume(struct device *device)
8172{
8173 struct pci_dev *pdev = to_pci_dev(device);
8174 struct net_device *dev = pci_get_drvdata(pdev);
8175 struct bnxt *bp = netdev_priv(dev);
8176 int rc = 0;
8177
8178 rtnl_lock();
8179 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
8180 rc = -ENODEV;
8181 goto resume_exit;
8182 }
8183 rc = bnxt_hwrm_func_reset(bp);
8184 if (rc) {
8185 rc = -EBUSY;
8186 goto resume_exit;
8187 }
8188 bnxt_get_wol_settings(bp);
8189 if (netif_running(dev)) {
8190 rc = bnxt_open(dev);
8191 if (!rc)
8192 netif_device_attach(dev);
8193 }
8194
8195resume_exit:
8196 rtnl_unlock();
8197 return rc;
8198}
8199
8200static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
8201#define BNXT_PM_OPS (&bnxt_pm_ops)
8202
8203#else
8204
8205#define BNXT_PM_OPS NULL
8206
8207#endif /* CONFIG_PM_SLEEP */
8208
Satish Baddipadige6316ea62016-03-07 15:38:48 -05008209/**
8210 * bnxt_io_error_detected - called when PCI error is detected
8211 * @pdev: Pointer to PCI device
8212 * @state: The current pci connection state
8213 *
8214 * This function is called after a PCI bus error affecting
8215 * this device has been detected.
8216 */
8217static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
8218 pci_channel_state_t state)
8219{
8220 struct net_device *netdev = pci_get_drvdata(pdev);
Michael Chana588e452016-12-07 00:26:21 -05008221 struct bnxt *bp = netdev_priv(netdev);
Satish Baddipadige6316ea62016-03-07 15:38:48 -05008222
8223 netdev_info(netdev, "PCI I/O error detected\n");
8224
8225 rtnl_lock();
8226 netif_device_detach(netdev);
8227
Michael Chana588e452016-12-07 00:26:21 -05008228 bnxt_ulp_stop(bp);
8229
Satish Baddipadige6316ea62016-03-07 15:38:48 -05008230 if (state == pci_channel_io_perm_failure) {
8231 rtnl_unlock();
8232 return PCI_ERS_RESULT_DISCONNECT;
8233 }
8234
8235 if (netif_running(netdev))
8236 bnxt_close(netdev);
8237
8238 pci_disable_device(pdev);
8239 rtnl_unlock();
8240
8241 /* Request a slot slot reset. */
8242 return PCI_ERS_RESULT_NEED_RESET;
8243}
8244
8245/**
8246 * bnxt_io_slot_reset - called after the pci bus has been reset.
8247 * @pdev: Pointer to PCI device
8248 *
8249 * Restart the card from scratch, as if from a cold-boot.
8250 * At this point, the card has exprienced a hard reset,
8251 * followed by fixups by BIOS, and has its config space
8252 * set up identically to what it was at cold boot.
8253 */
8254static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
8255{
8256 struct net_device *netdev = pci_get_drvdata(pdev);
8257 struct bnxt *bp = netdev_priv(netdev);
8258 int err = 0;
8259 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8260
8261 netdev_info(bp->dev, "PCI Slot Reset\n");
8262
8263 rtnl_lock();
8264
8265 if (pci_enable_device(pdev)) {
8266 dev_err(&pdev->dev,
8267 "Cannot re-enable PCI device after reset.\n");
8268 } else {
8269 pci_set_master(pdev);
8270
Michael Chanaa8ed022016-12-07 00:26:17 -05008271 err = bnxt_hwrm_func_reset(bp);
8272 if (!err && netif_running(netdev))
Satish Baddipadige6316ea62016-03-07 15:38:48 -05008273 err = bnxt_open(netdev);
8274
Michael Chana588e452016-12-07 00:26:21 -05008275 if (!err) {
Satish Baddipadige6316ea62016-03-07 15:38:48 -05008276 result = PCI_ERS_RESULT_RECOVERED;
Michael Chana588e452016-12-07 00:26:21 -05008277 bnxt_ulp_start(bp);
8278 }
Satish Baddipadige6316ea62016-03-07 15:38:48 -05008279 }
8280
8281 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
8282 dev_close(netdev);
8283
8284 rtnl_unlock();
8285
8286 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8287 if (err) {
8288 dev_err(&pdev->dev,
8289 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8290 err); /* non-fatal, continue */
8291 }
8292
8293 return PCI_ERS_RESULT_RECOVERED;
8294}
8295
8296/**
8297 * bnxt_io_resume - called when traffic can start flowing again.
8298 * @pdev: Pointer to PCI device
8299 *
8300 * This callback is called when the error recovery driver tells
8301 * us that its OK to resume normal operation.
8302 */
8303static void bnxt_io_resume(struct pci_dev *pdev)
8304{
8305 struct net_device *netdev = pci_get_drvdata(pdev);
8306
8307 rtnl_lock();
8308
8309 netif_device_attach(netdev);
8310
8311 rtnl_unlock();
8312}
8313
8314static const struct pci_error_handlers bnxt_err_handler = {
8315 .error_detected = bnxt_io_error_detected,
8316 .slot_reset = bnxt_io_slot_reset,
8317 .resume = bnxt_io_resume
8318};
8319
Michael Chanc0c050c2015-10-22 16:01:17 -04008320static struct pci_driver bnxt_pci_driver = {
8321 .name = DRV_MODULE_NAME,
8322 .id_table = bnxt_pci_tbl,
8323 .probe = bnxt_init_one,
8324 .remove = bnxt_remove_one,
Michael Chand196ece2017-04-04 18:14:08 -04008325 .shutdown = bnxt_shutdown,
Michael Chanf65a2042017-04-04 18:14:11 -04008326 .driver.pm = BNXT_PM_OPS,
Satish Baddipadige6316ea62016-03-07 15:38:48 -05008327 .err_handler = &bnxt_err_handler,
Michael Chanc0c050c2015-10-22 16:01:17 -04008328#if defined(CONFIG_BNXT_SRIOV)
8329 .sriov_configure = bnxt_sriov_configure,
8330#endif
8331};
8332
8333module_pci_driver(bnxt_pci_driver);