blob: b53f958ffcb80d557c71536faf6f52432077e664 [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
Michael Chan11f15ed2016-04-05 14:08:55 -04003 * Copyright (c) 2014-2016 Broadcom Corporation
Michael Chanc0c050c2015-10-22 16:01:17 -04004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11
12#include <linux/stringify.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/errno.h>
16#include <linux/ioport.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/interrupt.h>
20#include <linux/pci.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/dma-mapping.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/delay.h>
29#include <asm/byteorder.h>
30#include <asm/page.h>
31#include <linux/time.h>
32#include <linux/mii.h>
33#include <linux/if.h>
34#include <linux/if_vlan.h>
Rob Swindell5ac67d82016-09-19 03:58:03 -040035#include <linux/rtc.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040036#include <net/ip.h>
37#include <net/tcp.h>
38#include <net/udp.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Alexander Duyckad51b8e2016-06-16 12:21:19 -070041#include <net/udp_tunnel.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040042#include <linux/workqueue.h>
43#include <linux/prefetch.h>
44#include <linux/cache.h>
45#include <linux/log2.h>
46#include <linux/aer.h>
47#include <linux/bitmap.h>
48#include <linux/cpu_rmap.h>
49
50#include "bnxt_hsi.h"
51#include "bnxt.h"
Michael Chana588e452016-12-07 00:26:21 -050052#include "bnxt_ulp.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040053#include "bnxt_sriov.h"
54#include "bnxt_ethtool.h"
Michael Chan7df4ae92016-12-02 21:17:17 -050055#include "bnxt_dcb.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040056
57#define BNXT_TX_TIMEOUT (5 * HZ)
58
59static const char version[] =
60 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
61
62MODULE_LICENSE("GPL");
63MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
64MODULE_VERSION(DRV_MODULE_VERSION);
65
66#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
67#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
68#define BNXT_RX_COPY_THRESH 256
69
Michael Chan4419dbe2016-02-10 17:33:49 -050070#define BNXT_TX_PUSH_THRESH 164
Michael Chanc0c050c2015-10-22 16:01:17 -040071
72enum board_idx {
David Christensenfbc9a522015-12-27 18:19:29 -050073 BCM57301,
Michael Chanc0c050c2015-10-22 16:01:17 -040074 BCM57302,
75 BCM57304,
Michael Chan1f681682016-07-25 12:33:37 -040076 BCM57417_NPAR,
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -040077 BCM58700,
Michael Chanb24eb6a2016-06-13 02:25:36 -040078 BCM57311,
79 BCM57312,
David Christensenfbc9a522015-12-27 18:19:29 -050080 BCM57402,
Michael Chanc0c050c2015-10-22 16:01:17 -040081 BCM57404,
82 BCM57406,
Michael Chan1f681682016-07-25 12:33:37 -040083 BCM57402_NPAR,
84 BCM57407,
Michael Chanb24eb6a2016-06-13 02:25:36 -040085 BCM57412,
86 BCM57414,
87 BCM57416,
88 BCM57417,
Michael Chan1f681682016-07-25 12:33:37 -040089 BCM57412_NPAR,
Michael Chan5049e332016-05-15 03:04:50 -040090 BCM57314,
Michael Chan1f681682016-07-25 12:33:37 -040091 BCM57417_SFP,
92 BCM57416_SFP,
93 BCM57404_NPAR,
94 BCM57406_NPAR,
95 BCM57407_SFP,
Michael Chanadbc8302016-09-19 03:58:01 -040096 BCM57407_NPAR,
Michael Chan1f681682016-07-25 12:33:37 -040097 BCM57414_NPAR,
98 BCM57416_NPAR,
Michael Chanadbc8302016-09-19 03:58:01 -040099 NETXTREME_E_VF,
100 NETXTREME_C_VF,
Michael Chanc0c050c2015-10-22 16:01:17 -0400101};
102
103/* indexed by enum above */
104static const struct {
105 char *name;
106} board_info[] = {
Michael Chanadbc8302016-09-19 03:58:01 -0400107 { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
108 { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
109 { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400110 { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400111 { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
112 { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
113 { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
114 { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
115 { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
116 { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400117 { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400118 { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
119 { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
120 { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
121 { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
122 { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400123 { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400124 { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
125 { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
126 { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
Michael Chan1f681682016-07-25 12:33:37 -0400127 { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
128 { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400129 { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
130 { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
Michael Chan1f681682016-07-25 12:33:37 -0400131 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
132 { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
Michael Chanadbc8302016-09-19 03:58:01 -0400133 { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134 { "Broadcom NetXtreme-C Ethernet Virtual Function" },
Michael Chanc0c050c2015-10-22 16:01:17 -0400135};
136
137static const struct pci_device_id bnxt_pci_tbl[] = {
Michael Chanadbc8302016-09-19 03:58:01 -0400138 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
David Christensenfbc9a522015-12-27 18:19:29 -0500139 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400140 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
141 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
Michael Chan1f681682016-07-25 12:33:37 -0400142 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -0400143 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400144 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
145 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
David Christensenfbc9a522015-12-27 18:19:29 -0500146 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400147 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
148 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
Michael Chan1f681682016-07-25 12:33:37 -0400149 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
150 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400151 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
152 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
153 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
154 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
Michael Chan1f681682016-07-25 12:33:37 -0400155 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
Michael Chan5049e332016-05-15 03:04:50 -0400156 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
Michael Chan1f681682016-07-25 12:33:37 -0400157 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
158 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
159 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
160 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
161 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
Michael Chanadbc8302016-09-19 03:58:01 -0400162 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
163 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400164 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400165 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400166 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400167 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
Michael Chanc0c050c2015-10-22 16:01:17 -0400168#ifdef CONFIG_BNXT_SRIOV
Michael Chanadbc8302016-09-19 03:58:01 -0400169 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
170 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
171 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
172 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
173 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
174 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
Michael Chanc0c050c2015-10-22 16:01:17 -0400175#endif
176 { 0 }
177};
178
179MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
180
181static const u16 bnxt_vf_req_snif[] = {
182 HWRM_FUNC_CFG,
183 HWRM_PORT_PHY_QCFG,
184 HWRM_CFA_L2_FILTER_ALLOC,
185};
186
Michael Chan25be8622016-04-05 14:09:00 -0400187static const u16 bnxt_async_events_arr[] = {
Michael Chan87c374d2016-12-02 21:17:16 -0500188 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
189 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
190 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
191 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
192 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
Michael Chan25be8622016-04-05 14:09:00 -0400193};
194
Michael Chanc0c050c2015-10-22 16:01:17 -0400195static bool bnxt_vf_pciid(enum board_idx idx)
196{
Michael Chanadbc8302016-09-19 03:58:01 -0400197 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
Michael Chanc0c050c2015-10-22 16:01:17 -0400198}
199
200#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
201#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
202#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
203
204#define BNXT_CP_DB_REARM(db, raw_cons) \
205 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
206
207#define BNXT_CP_DB(db, raw_cons) \
208 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
209
210#define BNXT_CP_DB_IRQ_DIS(db) \
211 writel(DB_CP_IRQ_DIS_FLAGS, db)
212
213static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
214{
215 /* Tell compiler to fetch tx indices from memory. */
216 barrier();
217
218 return bp->tx_ring_size -
219 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
220}
221
222static const u16 bnxt_lhint_arr[] = {
223 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
224 TX_BD_FLAGS_LHINT_512_TO_1023,
225 TX_BD_FLAGS_LHINT_1024_TO_2047,
226 TX_BD_FLAGS_LHINT_1024_TO_2047,
227 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
228 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
229 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
230 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
231 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
232 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
233 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
234 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
235 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
236 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
237 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
238 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
239 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
240 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
241 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
242};
243
244static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
245{
246 struct bnxt *bp = netdev_priv(dev);
247 struct tx_bd *txbd;
248 struct tx_bd_ext *txbd1;
249 struct netdev_queue *txq;
250 int i;
251 dma_addr_t mapping;
252 unsigned int length, pad = 0;
253 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
254 u16 prod, last_frag;
255 struct pci_dev *pdev = bp->pdev;
Michael Chanc0c050c2015-10-22 16:01:17 -0400256 struct bnxt_tx_ring_info *txr;
257 struct bnxt_sw_tx_bd *tx_buf;
258
259 i = skb_get_queue_mapping(skb);
260 if (unlikely(i >= bp->tx_nr_rings)) {
261 dev_kfree_skb_any(skb);
262 return NETDEV_TX_OK;
263 }
264
Michael Chanb6ab4b02016-01-02 23:44:59 -0500265 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -0400266 txq = netdev_get_tx_queue(dev, i);
267 prod = txr->tx_prod;
268
269 free_size = bnxt_tx_avail(bp, txr);
270 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
271 netif_tx_stop_queue(txq);
272 return NETDEV_TX_BUSY;
273 }
274
275 length = skb->len;
276 len = skb_headlen(skb);
277 last_frag = skb_shinfo(skb)->nr_frags;
278
279 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
280
281 txbd->tx_bd_opaque = prod;
282
283 tx_buf = &txr->tx_buf_ring[prod];
284 tx_buf->skb = skb;
285 tx_buf->nr_frags = last_frag;
286
287 vlan_tag_flags = 0;
288 cfa_action = 0;
289 if (skb_vlan_tag_present(skb)) {
290 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
291 skb_vlan_tag_get(skb);
292 /* Currently supports 8021Q, 8021AD vlan offloads
293 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
294 */
295 if (skb->vlan_proto == htons(ETH_P_8021Q))
296 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
297 }
298
299 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
Michael Chan4419dbe2016-02-10 17:33:49 -0500300 struct tx_push_buffer *tx_push_buf = txr->tx_push;
301 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
302 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
303 void *pdata = tx_push_buf->data;
304 u64 *end;
305 int j, push_len;
Michael Chanc0c050c2015-10-22 16:01:17 -0400306
307 /* Set COAL_NOW to be ready quickly for the next push */
308 tx_push->tx_bd_len_flags_type =
309 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
310 TX_BD_TYPE_LONG_TX_BD |
311 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
312 TX_BD_FLAGS_COAL_NOW |
313 TX_BD_FLAGS_PACKET_END |
314 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
315
316 if (skb->ip_summed == CHECKSUM_PARTIAL)
317 tx_push1->tx_bd_hsize_lflags =
318 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
319 else
320 tx_push1->tx_bd_hsize_lflags = 0;
321
322 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
323 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
324
Michael Chanfbb0fa82016-02-22 02:10:26 -0500325 end = pdata + length;
326 end = PTR_ALIGN(end, 8) - 1;
Michael Chan4419dbe2016-02-10 17:33:49 -0500327 *end = 0;
328
Michael Chanc0c050c2015-10-22 16:01:17 -0400329 skb_copy_from_linear_data(skb, pdata, len);
330 pdata += len;
331 for (j = 0; j < last_frag; j++) {
332 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
333 void *fptr;
334
335 fptr = skb_frag_address_safe(frag);
336 if (!fptr)
337 goto normal_tx;
338
339 memcpy(pdata, fptr, skb_frag_size(frag));
340 pdata += skb_frag_size(frag);
341 }
342
Michael Chan4419dbe2016-02-10 17:33:49 -0500343 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
344 txbd->tx_bd_haddr = txr->data_mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400345 prod = NEXT_TX(prod);
346 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
347 memcpy(txbd, tx_push1, sizeof(*txbd));
348 prod = NEXT_TX(prod);
Michael Chan4419dbe2016-02-10 17:33:49 -0500349 tx_push->doorbell =
Michael Chanc0c050c2015-10-22 16:01:17 -0400350 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
351 txr->tx_prod = prod;
352
Michael Chanb9a84602016-06-06 02:37:14 -0400353 tx_buf->is_push = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -0400354 netdev_tx_sent_queue(txq, skb->len);
Michael Chanb9a84602016-06-06 02:37:14 -0400355 wmb(); /* Sync is_push and byte queue before pushing data */
Michael Chanc0c050c2015-10-22 16:01:17 -0400356
Michael Chan4419dbe2016-02-10 17:33:49 -0500357 push_len = (length + sizeof(*tx_push) + 7) / 8;
358 if (push_len > 16) {
359 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
Michael Chan9d137442016-09-05 01:57:35 -0400360 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
361 (push_len - 16) << 1);
Michael Chan4419dbe2016-02-10 17:33:49 -0500362 } else {
363 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
364 push_len);
365 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400366
Michael Chanc0c050c2015-10-22 16:01:17 -0400367 goto tx_done;
368 }
369
370normal_tx:
371 if (length < BNXT_MIN_PKT_SIZE) {
372 pad = BNXT_MIN_PKT_SIZE - length;
373 if (skb_pad(skb, pad)) {
374 /* SKB already freed. */
375 tx_buf->skb = NULL;
376 return NETDEV_TX_OK;
377 }
378 length = BNXT_MIN_PKT_SIZE;
379 }
380
381 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
382
383 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
384 dev_kfree_skb_any(skb);
385 tx_buf->skb = NULL;
386 return NETDEV_TX_OK;
387 }
388
389 dma_unmap_addr_set(tx_buf, mapping, mapping);
390 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
391 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
392
393 txbd->tx_bd_haddr = cpu_to_le64(mapping);
394
395 prod = NEXT_TX(prod);
396 txbd1 = (struct tx_bd_ext *)
397 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
398
399 txbd1->tx_bd_hsize_lflags = 0;
400 if (skb_is_gso(skb)) {
401 u32 hdr_len;
402
403 if (skb->encapsulation)
404 hdr_len = skb_inner_network_offset(skb) +
405 skb_inner_network_header_len(skb) +
406 inner_tcp_hdrlen(skb);
407 else
408 hdr_len = skb_transport_offset(skb) +
409 tcp_hdrlen(skb);
410
411 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
412 TX_BD_FLAGS_T_IPID |
413 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
414 length = skb_shinfo(skb)->gso_size;
415 txbd1->tx_bd_mss = cpu_to_le32(length);
416 length += hdr_len;
417 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
418 txbd1->tx_bd_hsize_lflags =
419 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
420 txbd1->tx_bd_mss = 0;
421 }
422
423 length >>= 9;
424 flags |= bnxt_lhint_arr[length];
425 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
426
427 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
428 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
429 for (i = 0; i < last_frag; i++) {
430 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
431
432 prod = NEXT_TX(prod);
433 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
434
435 len = skb_frag_size(frag);
436 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
437 DMA_TO_DEVICE);
438
439 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
440 goto tx_dma_error;
441
442 tx_buf = &txr->tx_buf_ring[prod];
443 dma_unmap_addr_set(tx_buf, mapping, mapping);
444
445 txbd->tx_bd_haddr = cpu_to_le64(mapping);
446
447 flags = len << TX_BD_LEN_SHIFT;
448 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
449 }
450
451 flags &= ~TX_BD_LEN;
452 txbd->tx_bd_len_flags_type =
453 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
454 TX_BD_FLAGS_PACKET_END);
455
456 netdev_tx_sent_queue(txq, skb->len);
457
458 /* Sync BD data before updating doorbell */
459 wmb();
460
461 prod = NEXT_TX(prod);
462 txr->tx_prod = prod;
463
464 writel(DB_KEY_TX | prod, txr->tx_doorbell);
465 writel(DB_KEY_TX | prod, txr->tx_doorbell);
466
467tx_done:
468
469 mmiowb();
470
471 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
472 netif_tx_stop_queue(txq);
473
474 /* netif_tx_stop_queue() must be done before checking
475 * tx index in bnxt_tx_avail() below, because in
476 * bnxt_tx_int(), we update tx index before checking for
477 * netif_tx_queue_stopped().
478 */
479 smp_mb();
480 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
481 netif_tx_wake_queue(txq);
482 }
483 return NETDEV_TX_OK;
484
485tx_dma_error:
486 last_frag = i;
487
488 /* start back at beginning and unmap skb */
489 prod = txr->tx_prod;
490 tx_buf = &txr->tx_buf_ring[prod];
491 tx_buf->skb = NULL;
492 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
493 skb_headlen(skb), PCI_DMA_TODEVICE);
494 prod = NEXT_TX(prod);
495
496 /* unmap remaining mapped pages */
497 for (i = 0; i < last_frag; i++) {
498 prod = NEXT_TX(prod);
499 tx_buf = &txr->tx_buf_ring[prod];
500 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
501 skb_frag_size(&skb_shinfo(skb)->frags[i]),
502 PCI_DMA_TODEVICE);
503 }
504
505 dev_kfree_skb_any(skb);
506 return NETDEV_TX_OK;
507}
508
509static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
510{
Michael Chanb6ab4b02016-01-02 23:44:59 -0500511 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chanb81a90d2016-01-02 23:45:01 -0500512 int index = txr - &bp->tx_ring[0];
Michael Chanc0c050c2015-10-22 16:01:17 -0400513 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
514 u16 cons = txr->tx_cons;
515 struct pci_dev *pdev = bp->pdev;
516 int i;
517 unsigned int tx_bytes = 0;
518
519 for (i = 0; i < nr_pkts; i++) {
520 struct bnxt_sw_tx_bd *tx_buf;
521 struct sk_buff *skb;
522 int j, last;
523
524 tx_buf = &txr->tx_buf_ring[cons];
525 cons = NEXT_TX(cons);
526 skb = tx_buf->skb;
527 tx_buf->skb = NULL;
528
529 if (tx_buf->is_push) {
530 tx_buf->is_push = 0;
531 goto next_tx_int;
532 }
533
534 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
535 skb_headlen(skb), PCI_DMA_TODEVICE);
536 last = tx_buf->nr_frags;
537
538 for (j = 0; j < last; j++) {
539 cons = NEXT_TX(cons);
540 tx_buf = &txr->tx_buf_ring[cons];
541 dma_unmap_page(
542 &pdev->dev,
543 dma_unmap_addr(tx_buf, mapping),
544 skb_frag_size(&skb_shinfo(skb)->frags[j]),
545 PCI_DMA_TODEVICE);
546 }
547
548next_tx_int:
549 cons = NEXT_TX(cons);
550
551 tx_bytes += skb->len;
552 dev_kfree_skb_any(skb);
553 }
554
555 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
556 txr->tx_cons = cons;
557
558 /* Need to make the tx_cons update visible to bnxt_start_xmit()
559 * before checking for netif_tx_queue_stopped(). Without the
560 * memory barrier, there is a small possibility that bnxt_start_xmit()
561 * will miss it and cause the queue to be stopped forever.
562 */
563 smp_mb();
564
565 if (unlikely(netif_tx_queue_stopped(txq)) &&
566 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
567 __netif_tx_lock(txq, smp_processor_id());
568 if (netif_tx_queue_stopped(txq) &&
569 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
570 txr->dev_state != BNXT_DEV_STATE_CLOSING)
571 netif_tx_wake_queue(txq);
572 __netif_tx_unlock(txq);
573 }
574}
575
576static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
577 gfp_t gfp)
578{
579 u8 *data;
580 struct pci_dev *pdev = bp->pdev;
581
582 data = kmalloc(bp->rx_buf_size, gfp);
583 if (!data)
584 return NULL;
585
586 *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
587 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
588
589 if (dma_mapping_error(&pdev->dev, *mapping)) {
590 kfree(data);
591 data = NULL;
592 }
593 return data;
594}
595
596static inline int bnxt_alloc_rx_data(struct bnxt *bp,
597 struct bnxt_rx_ring_info *rxr,
598 u16 prod, gfp_t gfp)
599{
600 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
601 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
602 u8 *data;
603 dma_addr_t mapping;
604
605 data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
606 if (!data)
607 return -ENOMEM;
608
609 rx_buf->data = data;
610 dma_unmap_addr_set(rx_buf, mapping, mapping);
611
612 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
613
614 return 0;
615}
616
617static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
618 u8 *data)
619{
620 u16 prod = rxr->rx_prod;
621 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
622 struct rx_bd *cons_bd, *prod_bd;
623
624 prod_rx_buf = &rxr->rx_buf_ring[prod];
625 cons_rx_buf = &rxr->rx_buf_ring[cons];
626
627 prod_rx_buf->data = data;
628
629 dma_unmap_addr_set(prod_rx_buf, mapping,
630 dma_unmap_addr(cons_rx_buf, mapping));
631
632 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
633 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
634
635 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
636}
637
638static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
639{
640 u16 next, max = rxr->rx_agg_bmap_size;
641
642 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
643 if (next >= max)
644 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
645 return next;
646}
647
648static inline int bnxt_alloc_rx_page(struct bnxt *bp,
649 struct bnxt_rx_ring_info *rxr,
650 u16 prod, gfp_t gfp)
651{
652 struct rx_bd *rxbd =
653 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
654 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
655 struct pci_dev *pdev = bp->pdev;
656 struct page *page;
657 dma_addr_t mapping;
658 u16 sw_prod = rxr->rx_sw_agg_prod;
Michael Chan89d0a062016-04-25 02:30:51 -0400659 unsigned int offset = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -0400660
Michael Chan89d0a062016-04-25 02:30:51 -0400661 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
662 page = rxr->rx_page;
663 if (!page) {
664 page = alloc_page(gfp);
665 if (!page)
666 return -ENOMEM;
667 rxr->rx_page = page;
668 rxr->rx_page_offset = 0;
669 }
670 offset = rxr->rx_page_offset;
671 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
672 if (rxr->rx_page_offset == PAGE_SIZE)
673 rxr->rx_page = NULL;
674 else
675 get_page(page);
676 } else {
677 page = alloc_page(gfp);
678 if (!page)
679 return -ENOMEM;
680 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400681
Michael Chan89d0a062016-04-25 02:30:51 -0400682 mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
Michael Chanc0c050c2015-10-22 16:01:17 -0400683 PCI_DMA_FROMDEVICE);
684 if (dma_mapping_error(&pdev->dev, mapping)) {
685 __free_page(page);
686 return -EIO;
687 }
688
689 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
690 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
691
692 __set_bit(sw_prod, rxr->rx_agg_bmap);
693 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
694 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
695
696 rx_agg_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400697 rx_agg_buf->offset = offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400698 rx_agg_buf->mapping = mapping;
699 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
700 rxbd->rx_bd_opaque = sw_prod;
701 return 0;
702}
703
704static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
705 u32 agg_bufs)
706{
707 struct bnxt *bp = bnapi->bp;
708 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500709 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400710 u16 prod = rxr->rx_agg_prod;
711 u16 sw_prod = rxr->rx_sw_agg_prod;
712 u32 i;
713
714 for (i = 0; i < agg_bufs; i++) {
715 u16 cons;
716 struct rx_agg_cmp *agg;
717 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
718 struct rx_bd *prod_bd;
719 struct page *page;
720
721 agg = (struct rx_agg_cmp *)
722 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
723 cons = agg->rx_agg_cmp_opaque;
724 __clear_bit(cons, rxr->rx_agg_bmap);
725
726 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
727 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
728
729 __set_bit(sw_prod, rxr->rx_agg_bmap);
730 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
731 cons_rx_buf = &rxr->rx_agg_ring[cons];
732
733 /* It is possible for sw_prod to be equal to cons, so
734 * set cons_rx_buf->page to NULL first.
735 */
736 page = cons_rx_buf->page;
737 cons_rx_buf->page = NULL;
738 prod_rx_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400739 prod_rx_buf->offset = cons_rx_buf->offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400740
741 prod_rx_buf->mapping = cons_rx_buf->mapping;
742
743 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
744
745 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
746 prod_bd->rx_bd_opaque = sw_prod;
747
748 prod = NEXT_RX_AGG(prod);
749 sw_prod = NEXT_RX_AGG(sw_prod);
750 cp_cons = NEXT_CMP(cp_cons);
751 }
752 rxr->rx_agg_prod = prod;
753 rxr->rx_sw_agg_prod = sw_prod;
754}
755
756static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
757 struct bnxt_rx_ring_info *rxr, u16 cons,
758 u16 prod, u8 *data, dma_addr_t dma_addr,
759 unsigned int len)
760{
761 int err;
762 struct sk_buff *skb;
763
764 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
765 if (unlikely(err)) {
766 bnxt_reuse_rx_data(rxr, cons, data);
767 return NULL;
768 }
769
770 skb = build_skb(data, 0);
771 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
772 PCI_DMA_FROMDEVICE);
773 if (!skb) {
774 kfree(data);
775 return NULL;
776 }
777
778 skb_reserve(skb, BNXT_RX_OFFSET);
779 skb_put(skb, len);
780 return skb;
781}
782
783static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
784 struct sk_buff *skb, u16 cp_cons,
785 u32 agg_bufs)
786{
787 struct pci_dev *pdev = bp->pdev;
788 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500789 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400790 u16 prod = rxr->rx_agg_prod;
791 u32 i;
792
793 for (i = 0; i < agg_bufs; i++) {
794 u16 cons, frag_len;
795 struct rx_agg_cmp *agg;
796 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
797 struct page *page;
798 dma_addr_t mapping;
799
800 agg = (struct rx_agg_cmp *)
801 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
802 cons = agg->rx_agg_cmp_opaque;
803 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
804 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
805
806 cons_rx_buf = &rxr->rx_agg_ring[cons];
Michael Chan89d0a062016-04-25 02:30:51 -0400807 skb_fill_page_desc(skb, i, cons_rx_buf->page,
808 cons_rx_buf->offset, frag_len);
Michael Chanc0c050c2015-10-22 16:01:17 -0400809 __clear_bit(cons, rxr->rx_agg_bmap);
810
811 /* It is possible for bnxt_alloc_rx_page() to allocate
812 * a sw_prod index that equals the cons index, so we
813 * need to clear the cons entry now.
814 */
815 mapping = dma_unmap_addr(cons_rx_buf, mapping);
816 page = cons_rx_buf->page;
817 cons_rx_buf->page = NULL;
818
819 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
820 struct skb_shared_info *shinfo;
821 unsigned int nr_frags;
822
823 shinfo = skb_shinfo(skb);
824 nr_frags = --shinfo->nr_frags;
825 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
826
827 dev_kfree_skb(skb);
828
829 cons_rx_buf->page = page;
830
831 /* Update prod since possibly some pages have been
832 * allocated already.
833 */
834 rxr->rx_agg_prod = prod;
835 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
836 return NULL;
837 }
838
Michael Chan2839f282016-04-25 02:30:50 -0400839 dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
Michael Chanc0c050c2015-10-22 16:01:17 -0400840 PCI_DMA_FROMDEVICE);
841
842 skb->data_len += frag_len;
843 skb->len += frag_len;
844 skb->truesize += PAGE_SIZE;
845
846 prod = NEXT_RX_AGG(prod);
847 cp_cons = NEXT_CMP(cp_cons);
848 }
849 rxr->rx_agg_prod = prod;
850 return skb;
851}
852
853static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
854 u8 agg_bufs, u32 *raw_cons)
855{
856 u16 last;
857 struct rx_agg_cmp *agg;
858
859 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
860 last = RING_CMP(*raw_cons);
861 agg = (struct rx_agg_cmp *)
862 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
863 return RX_AGG_CMP_VALID(agg, *raw_cons);
864}
865
866static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
867 unsigned int len,
868 dma_addr_t mapping)
869{
870 struct bnxt *bp = bnapi->bp;
871 struct pci_dev *pdev = bp->pdev;
872 struct sk_buff *skb;
873
874 skb = napi_alloc_skb(&bnapi->napi, len);
875 if (!skb)
876 return NULL;
877
878 dma_sync_single_for_cpu(&pdev->dev, mapping,
879 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
880
881 memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
882
883 dma_sync_single_for_device(&pdev->dev, mapping,
884 bp->rx_copy_thresh,
885 PCI_DMA_FROMDEVICE);
886
887 skb_put(skb, len);
888 return skb;
889}
890
Michael Chanfa7e2812016-05-10 19:18:00 -0400891static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
892 u32 *raw_cons, void *cmp)
893{
894 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
895 struct rx_cmp *rxcmp = cmp;
896 u32 tmp_raw_cons = *raw_cons;
897 u8 cmp_type, agg_bufs = 0;
898
899 cmp_type = RX_CMP_TYPE(rxcmp);
900
901 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
902 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
903 RX_CMP_AGG_BUFS) >>
904 RX_CMP_AGG_BUFS_SHIFT;
905 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
906 struct rx_tpa_end_cmp *tpa_end = cmp;
907
908 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
909 RX_TPA_END_CMP_AGG_BUFS) >>
910 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
911 }
912
913 if (agg_bufs) {
914 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
915 return -EBUSY;
916 }
917 *raw_cons = tmp_raw_cons;
918 return 0;
919}
920
921static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
922{
923 if (!rxr->bnapi->in_reset) {
924 rxr->bnapi->in_reset = true;
925 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
926 schedule_work(&bp->sp_task);
927 }
928 rxr->rx_next_cons = 0xffff;
929}
930
Michael Chanc0c050c2015-10-22 16:01:17 -0400931static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
932 struct rx_tpa_start_cmp *tpa_start,
933 struct rx_tpa_start_cmp_ext *tpa_start1)
934{
935 u8 agg_id = TPA_START_AGG_ID(tpa_start);
936 u16 cons, prod;
937 struct bnxt_tpa_info *tpa_info;
938 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
939 struct rx_bd *prod_bd;
940 dma_addr_t mapping;
941
942 cons = tpa_start->rx_tpa_start_cmp_opaque;
943 prod = rxr->rx_prod;
944 cons_rx_buf = &rxr->rx_buf_ring[cons];
945 prod_rx_buf = &rxr->rx_buf_ring[prod];
946 tpa_info = &rxr->rx_tpa[agg_id];
947
Michael Chanfa7e2812016-05-10 19:18:00 -0400948 if (unlikely(cons != rxr->rx_next_cons)) {
949 bnxt_sched_reset(bp, rxr);
950 return;
951 }
952
Michael Chanc0c050c2015-10-22 16:01:17 -0400953 prod_rx_buf->data = tpa_info->data;
954
955 mapping = tpa_info->mapping;
956 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
957
958 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
959
960 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
961
962 tpa_info->data = cons_rx_buf->data;
963 cons_rx_buf->data = NULL;
964 tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
965
966 tpa_info->len =
967 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
968 RX_TPA_START_CMP_LEN_SHIFT;
969 if (likely(TPA_START_HASH_VALID(tpa_start))) {
970 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
971
972 tpa_info->hash_type = PKT_HASH_TYPE_L4;
973 tpa_info->gso_type = SKB_GSO_TCPV4;
974 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
975 if (hash_type == 3)
976 tpa_info->gso_type = SKB_GSO_TCPV6;
977 tpa_info->rss_hash =
978 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
979 } else {
980 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
981 tpa_info->gso_type = 0;
982 if (netif_msg_rx_err(bp))
983 netdev_warn(bp->dev, "TPA packet without valid hash\n");
984 }
985 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
986 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
Michael Chan94758f82016-06-13 02:25:35 -0400987 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
Michael Chanc0c050c2015-10-22 16:01:17 -0400988
989 rxr->rx_prod = NEXT_RX(prod);
990 cons = NEXT_RX(cons);
Michael Chan376a5b82016-05-10 19:17:59 -0400991 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -0400992 cons_rx_buf = &rxr->rx_buf_ring[cons];
993
994 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
995 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
996 cons_rx_buf->data = NULL;
997}
998
999static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1000 u16 cp_cons, u32 agg_bufs)
1001{
1002 if (agg_bufs)
1003 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1004}
1005
Michael Chan94758f82016-06-13 02:25:35 -04001006static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1007 int payload_off, int tcp_ts,
1008 struct sk_buff *skb)
1009{
1010#ifdef CONFIG_INET
1011 struct tcphdr *th;
1012 int len, nw_off;
1013 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1014 u32 hdr_info = tpa_info->hdr_info;
1015 bool loopback = false;
1016
1017 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1018 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1019 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1020
1021 /* If the packet is an internal loopback packet, the offsets will
1022 * have an extra 4 bytes.
1023 */
1024 if (inner_mac_off == 4) {
1025 loopback = true;
1026 } else if (inner_mac_off > 4) {
1027 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1028 ETH_HLEN - 2));
1029
1030 /* We only support inner iPv4/ipv6. If we don't see the
1031 * correct protocol ID, it must be a loopback packet where
1032 * the offsets are off by 4.
1033 */
Dan Carpenter09a76362016-07-07 11:23:09 +03001034 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
Michael Chan94758f82016-06-13 02:25:35 -04001035 loopback = true;
1036 }
1037 if (loopback) {
1038 /* internal loopback packet, subtract all offsets by 4 */
1039 inner_ip_off -= 4;
1040 inner_mac_off -= 4;
1041 outer_ip_off -= 4;
1042 }
1043
1044 nw_off = inner_ip_off - ETH_HLEN;
1045 skb_set_network_header(skb, nw_off);
1046 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1047 struct ipv6hdr *iph = ipv6_hdr(skb);
1048
1049 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1050 len = skb->len - skb_transport_offset(skb);
1051 th = tcp_hdr(skb);
1052 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1053 } else {
1054 struct iphdr *iph = ip_hdr(skb);
1055
1056 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1057 len = skb->len - skb_transport_offset(skb);
1058 th = tcp_hdr(skb);
1059 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1060 }
1061
1062 if (inner_mac_off) { /* tunnel */
1063 struct udphdr *uh = NULL;
1064 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1065 ETH_HLEN - 2));
1066
1067 if (proto == htons(ETH_P_IP)) {
1068 struct iphdr *iph = (struct iphdr *)skb->data;
1069
1070 if (iph->protocol == IPPROTO_UDP)
1071 uh = (struct udphdr *)(iph + 1);
1072 } else {
1073 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1074
1075 if (iph->nexthdr == IPPROTO_UDP)
1076 uh = (struct udphdr *)(iph + 1);
1077 }
1078 if (uh) {
1079 if (uh->check)
1080 skb_shinfo(skb)->gso_type |=
1081 SKB_GSO_UDP_TUNNEL_CSUM;
1082 else
1083 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1084 }
1085 }
1086#endif
1087 return skb;
1088}
1089
Michael Chanc0c050c2015-10-22 16:01:17 -04001090#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1091#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1092
Michael Chan309369c2016-06-13 02:25:34 -04001093static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1094 int payload_off, int tcp_ts,
Michael Chanc0c050c2015-10-22 16:01:17 -04001095 struct sk_buff *skb)
1096{
Michael Chand1611c32015-10-25 22:27:57 -04001097#ifdef CONFIG_INET
Michael Chanc0c050c2015-10-22 16:01:17 -04001098 struct tcphdr *th;
Michael Chan309369c2016-06-13 02:25:34 -04001099 int len, nw_off, tcp_opt_len;
Michael Chanc0c050c2015-10-22 16:01:17 -04001100
Michael Chan309369c2016-06-13 02:25:34 -04001101 if (tcp_ts)
Michael Chanc0c050c2015-10-22 16:01:17 -04001102 tcp_opt_len = 12;
1103
Michael Chanc0c050c2015-10-22 16:01:17 -04001104 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1105 struct iphdr *iph;
1106
1107 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1108 ETH_HLEN;
1109 skb_set_network_header(skb, nw_off);
1110 iph = ip_hdr(skb);
1111 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1112 len = skb->len - skb_transport_offset(skb);
1113 th = tcp_hdr(skb);
1114 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1115 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1116 struct ipv6hdr *iph;
1117
1118 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1119 ETH_HLEN;
1120 skb_set_network_header(skb, nw_off);
1121 iph = ipv6_hdr(skb);
1122 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1123 len = skb->len - skb_transport_offset(skb);
1124 th = tcp_hdr(skb);
1125 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1126 } else {
1127 dev_kfree_skb_any(skb);
1128 return NULL;
1129 }
1130 tcp_gro_complete(skb);
1131
1132 if (nw_off) { /* tunnel */
1133 struct udphdr *uh = NULL;
1134
1135 if (skb->protocol == htons(ETH_P_IP)) {
1136 struct iphdr *iph = (struct iphdr *)skb->data;
1137
1138 if (iph->protocol == IPPROTO_UDP)
1139 uh = (struct udphdr *)(iph + 1);
1140 } else {
1141 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1142
1143 if (iph->nexthdr == IPPROTO_UDP)
1144 uh = (struct udphdr *)(iph + 1);
1145 }
1146 if (uh) {
1147 if (uh->check)
1148 skb_shinfo(skb)->gso_type |=
1149 SKB_GSO_UDP_TUNNEL_CSUM;
1150 else
1151 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1152 }
1153 }
1154#endif
1155 return skb;
1156}
1157
Michael Chan309369c2016-06-13 02:25:34 -04001158static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1159 struct bnxt_tpa_info *tpa_info,
1160 struct rx_tpa_end_cmp *tpa_end,
1161 struct rx_tpa_end_cmp_ext *tpa_end1,
1162 struct sk_buff *skb)
1163{
1164#ifdef CONFIG_INET
1165 int payload_off;
1166 u16 segs;
1167
1168 segs = TPA_END_TPA_SEGS(tpa_end);
1169 if (segs == 1)
1170 return skb;
1171
1172 NAPI_GRO_CB(skb)->count = segs;
1173 skb_shinfo(skb)->gso_size =
1174 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1175 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1176 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1177 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1178 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1179 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1180#endif
1181 return skb;
1182}
1183
Michael Chanc0c050c2015-10-22 16:01:17 -04001184static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1185 struct bnxt_napi *bnapi,
1186 u32 *raw_cons,
1187 struct rx_tpa_end_cmp *tpa_end,
1188 struct rx_tpa_end_cmp_ext *tpa_end1,
1189 bool *agg_event)
1190{
1191 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001192 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001193 u8 agg_id = TPA_END_AGG_ID(tpa_end);
1194 u8 *data, agg_bufs;
1195 u16 cp_cons = RING_CMP(*raw_cons);
1196 unsigned int len;
1197 struct bnxt_tpa_info *tpa_info;
1198 dma_addr_t mapping;
1199 struct sk_buff *skb;
1200
Michael Chanfa7e2812016-05-10 19:18:00 -04001201 if (unlikely(bnapi->in_reset)) {
1202 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1203
1204 if (rc < 0)
1205 return ERR_PTR(-EBUSY);
1206 return NULL;
1207 }
1208
Michael Chanc0c050c2015-10-22 16:01:17 -04001209 tpa_info = &rxr->rx_tpa[agg_id];
1210 data = tpa_info->data;
1211 prefetch(data);
1212 len = tpa_info->len;
1213 mapping = tpa_info->mapping;
1214
1215 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1216 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1217
1218 if (agg_bufs) {
1219 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1220 return ERR_PTR(-EBUSY);
1221
1222 *agg_event = true;
1223 cp_cons = NEXT_CMP(cp_cons);
1224 }
1225
1226 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
1227 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1228 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1229 agg_bufs, (int)MAX_SKB_FRAGS);
1230 return NULL;
1231 }
1232
1233 if (len <= bp->rx_copy_thresh) {
1234 skb = bnxt_copy_skb(bnapi, data, len, mapping);
1235 if (!skb) {
1236 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1237 return NULL;
1238 }
1239 } else {
1240 u8 *new_data;
1241 dma_addr_t new_mapping;
1242
1243 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1244 if (!new_data) {
1245 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1246 return NULL;
1247 }
1248
1249 tpa_info->data = new_data;
1250 tpa_info->mapping = new_mapping;
1251
1252 skb = build_skb(data, 0);
1253 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
1254 PCI_DMA_FROMDEVICE);
1255
1256 if (!skb) {
1257 kfree(data);
1258 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1259 return NULL;
1260 }
1261 skb_reserve(skb, BNXT_RX_OFFSET);
1262 skb_put(skb, len);
1263 }
1264
1265 if (agg_bufs) {
1266 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1267 if (!skb) {
1268 /* Page reuse already handled by bnxt_rx_pages(). */
1269 return NULL;
1270 }
1271 }
1272 skb->protocol = eth_type_trans(skb, bp->dev);
1273
1274 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1275 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1276
Michael Chan8852ddb2016-06-06 02:37:16 -04001277 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1278 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001279 u16 vlan_proto = tpa_info->metadata >>
1280 RX_CMP_FLAGS2_METADATA_TPID_SFT;
Michael Chan8852ddb2016-06-06 02:37:16 -04001281 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001282
Michael Chan8852ddb2016-06-06 02:37:16 -04001283 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001284 }
1285
1286 skb_checksum_none_assert(skb);
1287 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1288 skb->ip_summed = CHECKSUM_UNNECESSARY;
1289 skb->csum_level =
1290 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1291 }
1292
1293 if (TPA_END_GRO(tpa_end))
Michael Chan309369c2016-06-13 02:25:34 -04001294 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001295
1296 return skb;
1297}
1298
1299/* returns the following:
1300 * 1 - 1 packet successfully received
1301 * 0 - successful TPA_START, packet not completed yet
1302 * -EBUSY - completion ring does not have all the agg buffers yet
1303 * -ENOMEM - packet aborted due to out of memory
1304 * -EIO - packet aborted due to hw error indicated in BD
1305 */
1306static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1307 bool *agg_event)
1308{
1309 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001310 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001311 struct net_device *dev = bp->dev;
1312 struct rx_cmp *rxcmp;
1313 struct rx_cmp_ext *rxcmp1;
1314 u32 tmp_raw_cons = *raw_cons;
1315 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1316 struct bnxt_sw_rx_bd *rx_buf;
1317 unsigned int len;
1318 u8 *data, agg_bufs, cmp_type;
1319 dma_addr_t dma_addr;
1320 struct sk_buff *skb;
1321 int rc = 0;
1322
1323 rxcmp = (struct rx_cmp *)
1324 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1325
1326 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1327 cp_cons = RING_CMP(tmp_raw_cons);
1328 rxcmp1 = (struct rx_cmp_ext *)
1329 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1330
1331 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1332 return -EBUSY;
1333
1334 cmp_type = RX_CMP_TYPE(rxcmp);
1335
1336 prod = rxr->rx_prod;
1337
1338 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1339 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1340 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1341
1342 goto next_rx_no_prod;
1343
1344 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1345 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1346 (struct rx_tpa_end_cmp *)rxcmp,
1347 (struct rx_tpa_end_cmp_ext *)rxcmp1,
1348 agg_event);
1349
1350 if (unlikely(IS_ERR(skb)))
1351 return -EBUSY;
1352
1353 rc = -ENOMEM;
1354 if (likely(skb)) {
1355 skb_record_rx_queue(skb, bnapi->index);
Michael Chanb356a2e2016-12-29 12:13:31 -05001356 napi_gro_receive(&bnapi->napi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001357 rc = 1;
1358 }
1359 goto next_rx_no_prod;
1360 }
1361
1362 cons = rxcmp->rx_cmp_opaque;
1363 rx_buf = &rxr->rx_buf_ring[cons];
1364 data = rx_buf->data;
Michael Chanfa7e2812016-05-10 19:18:00 -04001365 if (unlikely(cons != rxr->rx_next_cons)) {
1366 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1367
1368 bnxt_sched_reset(bp, rxr);
1369 return rc1;
1370 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001371 prefetch(data);
1372
1373 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1374 RX_CMP_AGG_BUFS_SHIFT;
1375
1376 if (agg_bufs) {
1377 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1378 return -EBUSY;
1379
1380 cp_cons = NEXT_CMP(cp_cons);
1381 *agg_event = true;
1382 }
1383
1384 rx_buf->data = NULL;
1385 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1386 bnxt_reuse_rx_data(rxr, cons, data);
1387 if (agg_bufs)
1388 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1389
1390 rc = -EIO;
1391 goto next_rx;
1392 }
1393
1394 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1395 dma_addr = dma_unmap_addr(rx_buf, mapping);
1396
1397 if (len <= bp->rx_copy_thresh) {
1398 skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1399 bnxt_reuse_rx_data(rxr, cons, data);
1400 if (!skb) {
1401 rc = -ENOMEM;
1402 goto next_rx;
1403 }
1404 } else {
1405 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1406 if (!skb) {
1407 rc = -ENOMEM;
1408 goto next_rx;
1409 }
1410 }
1411
1412 if (agg_bufs) {
1413 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1414 if (!skb) {
1415 rc = -ENOMEM;
1416 goto next_rx;
1417 }
1418 }
1419
1420 if (RX_CMP_HASH_VALID(rxcmp)) {
1421 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1422 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1423
1424 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1425 if (hash_type != 1 && hash_type != 3)
1426 type = PKT_HASH_TYPE_L3;
1427 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1428 }
1429
1430 skb->protocol = eth_type_trans(skb, dev);
1431
Michael Chan8852ddb2016-06-06 02:37:16 -04001432 if ((rxcmp1->rx_cmp_flags2 &
1433 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1434 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001435 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
Michael Chan8852ddb2016-06-06 02:37:16 -04001436 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001437 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1438
Michael Chan8852ddb2016-06-06 02:37:16 -04001439 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001440 }
1441
1442 skb_checksum_none_assert(skb);
1443 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1444 if (dev->features & NETIF_F_RXCSUM) {
1445 skb->ip_summed = CHECKSUM_UNNECESSARY;
1446 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1447 }
1448 } else {
Satish Baddipadige665e3502015-12-27 18:19:21 -05001449 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1450 if (dev->features & NETIF_F_RXCSUM)
1451 cpr->rx_l4_csum_errors++;
1452 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001453 }
1454
1455 skb_record_rx_queue(skb, bnapi->index);
Michael Chanb356a2e2016-12-29 12:13:31 -05001456 napi_gro_receive(&bnapi->napi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001457 rc = 1;
1458
1459next_rx:
1460 rxr->rx_prod = NEXT_RX(prod);
Michael Chan376a5b82016-05-10 19:17:59 -04001461 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001462
1463next_rx_no_prod:
1464 *raw_cons = tmp_raw_cons;
1465
1466 return rc;
1467}
1468
Michael Chan4bb13ab2016-04-05 14:09:01 -04001469#define BNXT_GET_EVENT_PORT(data) \
Michael Chan87c374d2016-12-02 21:17:16 -05001470 ((data) & \
1471 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
Michael Chan4bb13ab2016-04-05 14:09:01 -04001472
Michael Chanc0c050c2015-10-22 16:01:17 -04001473static int bnxt_async_event_process(struct bnxt *bp,
1474 struct hwrm_async_event_cmpl *cmpl)
1475{
1476 u16 event_id = le16_to_cpu(cmpl->event_id);
1477
1478 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1479 switch (event_id) {
Michael Chan87c374d2016-12-02 21:17:16 -05001480 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
Michael Chan8cbde112016-04-11 04:11:14 -04001481 u32 data1 = le32_to_cpu(cmpl->event_data1);
1482 struct bnxt_link_info *link_info = &bp->link_info;
1483
1484 if (BNXT_VF(bp))
1485 goto async_event_process_exit;
1486 if (data1 & 0x20000) {
1487 u16 fw_speed = link_info->force_link_speed;
1488 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1489
1490 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1491 speed);
1492 }
Michael Chan286ef9d2016-11-16 21:13:08 -05001493 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
Michael Chan8cbde112016-04-11 04:11:14 -04001494 /* fall thru */
1495 }
Michael Chan87c374d2016-12-02 21:17:16 -05001496 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
Michael Chanc0c050c2015-10-22 16:01:17 -04001497 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
Jeffrey Huang19241362016-02-26 04:00:00 -05001498 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001499 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
Jeffrey Huang19241362016-02-26 04:00:00 -05001500 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001501 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001502 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
Michael Chan4bb13ab2016-04-05 14:09:01 -04001503 u32 data1 = le32_to_cpu(cmpl->event_data1);
1504 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1505
1506 if (BNXT_VF(bp))
1507 break;
1508
1509 if (bp->pf.port_id != port_id)
1510 break;
1511
Michael Chan4bb13ab2016-04-05 14:09:01 -04001512 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1513 break;
1514 }
Michael Chan87c374d2016-12-02 21:17:16 -05001515 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
Michael Chanfc0f1922016-06-13 02:25:30 -04001516 if (BNXT_PF(bp))
1517 goto async_event_process_exit;
1518 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1519 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001520 default:
Jeffrey Huang19241362016-02-26 04:00:00 -05001521 goto async_event_process_exit;
Michael Chanc0c050c2015-10-22 16:01:17 -04001522 }
Jeffrey Huang19241362016-02-26 04:00:00 -05001523 schedule_work(&bp->sp_task);
1524async_event_process_exit:
Michael Chana588e452016-12-07 00:26:21 -05001525 bnxt_ulp_async_events(bp, cmpl);
Michael Chanc0c050c2015-10-22 16:01:17 -04001526 return 0;
1527}
1528
1529static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1530{
1531 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1532 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1533 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1534 (struct hwrm_fwd_req_cmpl *)txcmp;
1535
1536 switch (cmpl_type) {
1537 case CMPL_BASE_TYPE_HWRM_DONE:
1538 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1539 if (seq_id == bp->hwrm_intr_seq_id)
1540 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1541 else
1542 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1543 break;
1544
1545 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1546 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1547
1548 if ((vf_id < bp->pf.first_vf_id) ||
1549 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1550 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1551 vf_id);
1552 return -EINVAL;
1553 }
1554
1555 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1556 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1557 schedule_work(&bp->sp_task);
1558 break;
1559
1560 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1561 bnxt_async_event_process(bp,
1562 (struct hwrm_async_event_cmpl *)txcmp);
1563
1564 default:
1565 break;
1566 }
1567
1568 return 0;
1569}
1570
1571static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1572{
1573 struct bnxt_napi *bnapi = dev_instance;
1574 struct bnxt *bp = bnapi->bp;
1575 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1576 u32 cons = RING_CMP(cpr->cp_raw_cons);
1577
1578 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1579 napi_schedule(&bnapi->napi);
1580 return IRQ_HANDLED;
1581}
1582
1583static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1584{
1585 u32 raw_cons = cpr->cp_raw_cons;
1586 u16 cons = RING_CMP(raw_cons);
1587 struct tx_cmp *txcmp;
1588
1589 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1590
1591 return TX_CMP_VALID(txcmp, raw_cons);
1592}
1593
Michael Chanc0c050c2015-10-22 16:01:17 -04001594static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1595{
1596 struct bnxt_napi *bnapi = dev_instance;
1597 struct bnxt *bp = bnapi->bp;
1598 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1599 u32 cons = RING_CMP(cpr->cp_raw_cons);
1600 u32 int_status;
1601
1602 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1603
1604 if (!bnxt_has_work(bp, cpr)) {
Jeffrey Huang11809492015-11-05 16:25:49 -05001605 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001606 /* return if erroneous interrupt */
1607 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1608 return IRQ_NONE;
1609 }
1610
1611 /* disable ring IRQ */
1612 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1613
1614 /* Return here if interrupt is shared and is disabled. */
1615 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1616 return IRQ_HANDLED;
1617
1618 napi_schedule(&bnapi->napi);
1619 return IRQ_HANDLED;
1620}
1621
1622static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1623{
1624 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1625 u32 raw_cons = cpr->cp_raw_cons;
1626 u32 cons;
1627 int tx_pkts = 0;
1628 int rx_pkts = 0;
1629 bool rx_event = false;
1630 bool agg_event = false;
1631 struct tx_cmp *txcmp;
1632
1633 while (1) {
1634 int rc;
1635
1636 cons = RING_CMP(raw_cons);
1637 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1638
1639 if (!TX_CMP_VALID(txcmp, raw_cons))
1640 break;
1641
Michael Chan67a95e22016-05-04 16:56:43 -04001642 /* The valid test of the entry must be done first before
1643 * reading any further.
1644 */
Michael Chanb67daab2016-05-15 03:04:51 -04001645 dma_rmb();
Michael Chanc0c050c2015-10-22 16:01:17 -04001646 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1647 tx_pkts++;
1648 /* return full budget so NAPI will complete. */
1649 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1650 rx_pkts = budget;
1651 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1652 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1653 if (likely(rc >= 0))
1654 rx_pkts += rc;
1655 else if (rc == -EBUSY) /* partial completion */
1656 break;
1657 rx_event = true;
1658 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1659 CMPL_BASE_TYPE_HWRM_DONE) ||
1660 (TX_CMP_TYPE(txcmp) ==
1661 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1662 (TX_CMP_TYPE(txcmp) ==
1663 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1664 bnxt_hwrm_handler(bp, txcmp);
1665 }
1666 raw_cons = NEXT_RAW_CMP(raw_cons);
1667
1668 if (rx_pkts == budget)
1669 break;
1670 }
1671
1672 cpr->cp_raw_cons = raw_cons;
1673 /* ACK completion ring before freeing tx ring and producing new
1674 * buffers in rx/agg rings to prevent overflowing the completion
1675 * ring.
1676 */
1677 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1678
1679 if (tx_pkts)
1680 bnxt_tx_int(bp, bnapi, tx_pkts);
1681
1682 if (rx_event) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001683 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001684
1685 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1686 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1687 if (agg_event) {
1688 writel(DB_KEY_RX | rxr->rx_agg_prod,
1689 rxr->rx_agg_doorbell);
1690 writel(DB_KEY_RX | rxr->rx_agg_prod,
1691 rxr->rx_agg_doorbell);
1692 }
1693 }
1694 return rx_pkts;
1695}
1696
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04001697static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1698{
1699 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1700 struct bnxt *bp = bnapi->bp;
1701 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1702 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1703 struct tx_cmp *txcmp;
1704 struct rx_cmp_ext *rxcmp1;
1705 u32 cp_cons, tmp_raw_cons;
1706 u32 raw_cons = cpr->cp_raw_cons;
1707 u32 rx_pkts = 0;
1708 bool agg_event = false;
1709
1710 while (1) {
1711 int rc;
1712
1713 cp_cons = RING_CMP(raw_cons);
1714 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1715
1716 if (!TX_CMP_VALID(txcmp, raw_cons))
1717 break;
1718
1719 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1720 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1721 cp_cons = RING_CMP(tmp_raw_cons);
1722 rxcmp1 = (struct rx_cmp_ext *)
1723 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1724
1725 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1726 break;
1727
1728 /* force an error to recycle the buffer */
1729 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1730 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1731
1732 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1733 if (likely(rc == -EIO))
1734 rx_pkts++;
1735 else if (rc == -EBUSY) /* partial completion */
1736 break;
1737 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1738 CMPL_BASE_TYPE_HWRM_DONE)) {
1739 bnxt_hwrm_handler(bp, txcmp);
1740 } else {
1741 netdev_err(bp->dev,
1742 "Invalid completion received on special ring\n");
1743 }
1744 raw_cons = NEXT_RAW_CMP(raw_cons);
1745
1746 if (rx_pkts == budget)
1747 break;
1748 }
1749
1750 cpr->cp_raw_cons = raw_cons;
1751 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1752 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1753 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1754
1755 if (agg_event) {
1756 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1757 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1758 }
1759
1760 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
1761 napi_complete(napi);
1762 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1763 }
1764 return rx_pkts;
1765}
1766
Michael Chanc0c050c2015-10-22 16:01:17 -04001767static int bnxt_poll(struct napi_struct *napi, int budget)
1768{
1769 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1770 struct bnxt *bp = bnapi->bp;
1771 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1772 int work_done = 0;
1773
Michael Chanc0c050c2015-10-22 16:01:17 -04001774 while (1) {
1775 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1776
1777 if (work_done >= budget)
1778 break;
1779
1780 if (!bnxt_has_work(bp, cpr)) {
1781 napi_complete(napi);
1782 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1783 break;
1784 }
1785 }
1786 mmiowb();
Michael Chanc0c050c2015-10-22 16:01:17 -04001787 return work_done;
1788}
1789
Michael Chanc0c050c2015-10-22 16:01:17 -04001790static void bnxt_free_tx_skbs(struct bnxt *bp)
1791{
1792 int i, max_idx;
1793 struct pci_dev *pdev = bp->pdev;
1794
Michael Chanb6ab4b02016-01-02 23:44:59 -05001795 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001796 return;
1797
1798 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1799 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001800 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001801 int j;
1802
Michael Chanc0c050c2015-10-22 16:01:17 -04001803 for (j = 0; j < max_idx;) {
1804 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1805 struct sk_buff *skb = tx_buf->skb;
1806 int k, last;
1807
1808 if (!skb) {
1809 j++;
1810 continue;
1811 }
1812
1813 tx_buf->skb = NULL;
1814
1815 if (tx_buf->is_push) {
1816 dev_kfree_skb(skb);
1817 j += 2;
1818 continue;
1819 }
1820
1821 dma_unmap_single(&pdev->dev,
1822 dma_unmap_addr(tx_buf, mapping),
1823 skb_headlen(skb),
1824 PCI_DMA_TODEVICE);
1825
1826 last = tx_buf->nr_frags;
1827 j += 2;
Michael Chand612a572016-01-28 03:11:22 -05001828 for (k = 0; k < last; k++, j++) {
1829 int ring_idx = j & bp->tx_ring_mask;
Michael Chanc0c050c2015-10-22 16:01:17 -04001830 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1831
Michael Chand612a572016-01-28 03:11:22 -05001832 tx_buf = &txr->tx_buf_ring[ring_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04001833 dma_unmap_page(
1834 &pdev->dev,
1835 dma_unmap_addr(tx_buf, mapping),
1836 skb_frag_size(frag), PCI_DMA_TODEVICE);
1837 }
1838 dev_kfree_skb(skb);
1839 }
1840 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1841 }
1842}
1843
1844static void bnxt_free_rx_skbs(struct bnxt *bp)
1845{
1846 int i, max_idx, max_agg_idx;
1847 struct pci_dev *pdev = bp->pdev;
1848
Michael Chanb6ab4b02016-01-02 23:44:59 -05001849 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001850 return;
1851
1852 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1853 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1854 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001855 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001856 int j;
1857
Michael Chanc0c050c2015-10-22 16:01:17 -04001858 if (rxr->rx_tpa) {
1859 for (j = 0; j < MAX_TPA; j++) {
1860 struct bnxt_tpa_info *tpa_info =
1861 &rxr->rx_tpa[j];
1862 u8 *data = tpa_info->data;
1863
1864 if (!data)
1865 continue;
1866
1867 dma_unmap_single(
1868 &pdev->dev,
1869 dma_unmap_addr(tpa_info, mapping),
1870 bp->rx_buf_use_size,
1871 PCI_DMA_FROMDEVICE);
1872
1873 tpa_info->data = NULL;
1874
1875 kfree(data);
1876 }
1877 }
1878
1879 for (j = 0; j < max_idx; j++) {
1880 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1881 u8 *data = rx_buf->data;
1882
1883 if (!data)
1884 continue;
1885
1886 dma_unmap_single(&pdev->dev,
1887 dma_unmap_addr(rx_buf, mapping),
1888 bp->rx_buf_use_size,
1889 PCI_DMA_FROMDEVICE);
1890
1891 rx_buf->data = NULL;
1892
1893 kfree(data);
1894 }
1895
1896 for (j = 0; j < max_agg_idx; j++) {
1897 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1898 &rxr->rx_agg_ring[j];
1899 struct page *page = rx_agg_buf->page;
1900
1901 if (!page)
1902 continue;
1903
1904 dma_unmap_page(&pdev->dev,
1905 dma_unmap_addr(rx_agg_buf, mapping),
Michael Chan2839f282016-04-25 02:30:50 -04001906 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
Michael Chanc0c050c2015-10-22 16:01:17 -04001907
1908 rx_agg_buf->page = NULL;
1909 __clear_bit(j, rxr->rx_agg_bmap);
1910
1911 __free_page(page);
1912 }
Michael Chan89d0a062016-04-25 02:30:51 -04001913 if (rxr->rx_page) {
1914 __free_page(rxr->rx_page);
1915 rxr->rx_page = NULL;
1916 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001917 }
1918}
1919
1920static void bnxt_free_skbs(struct bnxt *bp)
1921{
1922 bnxt_free_tx_skbs(bp);
1923 bnxt_free_rx_skbs(bp);
1924}
1925
1926static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1927{
1928 struct pci_dev *pdev = bp->pdev;
1929 int i;
1930
1931 for (i = 0; i < ring->nr_pages; i++) {
1932 if (!ring->pg_arr[i])
1933 continue;
1934
1935 dma_free_coherent(&pdev->dev, ring->page_size,
1936 ring->pg_arr[i], ring->dma_arr[i]);
1937
1938 ring->pg_arr[i] = NULL;
1939 }
1940 if (ring->pg_tbl) {
1941 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1942 ring->pg_tbl, ring->pg_tbl_map);
1943 ring->pg_tbl = NULL;
1944 }
1945 if (ring->vmem_size && *ring->vmem) {
1946 vfree(*ring->vmem);
1947 *ring->vmem = NULL;
1948 }
1949}
1950
1951static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1952{
1953 int i;
1954 struct pci_dev *pdev = bp->pdev;
1955
1956 if (ring->nr_pages > 1) {
1957 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1958 ring->nr_pages * 8,
1959 &ring->pg_tbl_map,
1960 GFP_KERNEL);
1961 if (!ring->pg_tbl)
1962 return -ENOMEM;
1963 }
1964
1965 for (i = 0; i < ring->nr_pages; i++) {
1966 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1967 ring->page_size,
1968 &ring->dma_arr[i],
1969 GFP_KERNEL);
1970 if (!ring->pg_arr[i])
1971 return -ENOMEM;
1972
1973 if (ring->nr_pages > 1)
1974 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1975 }
1976
1977 if (ring->vmem_size) {
1978 *ring->vmem = vzalloc(ring->vmem_size);
1979 if (!(*ring->vmem))
1980 return -ENOMEM;
1981 }
1982 return 0;
1983}
1984
1985static void bnxt_free_rx_rings(struct bnxt *bp)
1986{
1987 int i;
1988
Michael Chanb6ab4b02016-01-02 23:44:59 -05001989 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04001990 return;
1991
1992 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05001993 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04001994 struct bnxt_ring_struct *ring;
1995
Michael Chanc0c050c2015-10-22 16:01:17 -04001996 kfree(rxr->rx_tpa);
1997 rxr->rx_tpa = NULL;
1998
1999 kfree(rxr->rx_agg_bmap);
2000 rxr->rx_agg_bmap = NULL;
2001
2002 ring = &rxr->rx_ring_struct;
2003 bnxt_free_ring(bp, ring);
2004
2005 ring = &rxr->rx_agg_ring_struct;
2006 bnxt_free_ring(bp, ring);
2007 }
2008}
2009
2010static int bnxt_alloc_rx_rings(struct bnxt *bp)
2011{
2012 int i, rc, agg_rings = 0, tpa_rings = 0;
2013
Michael Chanb6ab4b02016-01-02 23:44:59 -05002014 if (!bp->rx_ring)
2015 return -ENOMEM;
2016
Michael Chanc0c050c2015-10-22 16:01:17 -04002017 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2018 agg_rings = 1;
2019
2020 if (bp->flags & BNXT_FLAG_TPA)
2021 tpa_rings = 1;
2022
2023 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002024 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002025 struct bnxt_ring_struct *ring;
2026
Michael Chanc0c050c2015-10-22 16:01:17 -04002027 ring = &rxr->rx_ring_struct;
2028
2029 rc = bnxt_alloc_ring(bp, ring);
2030 if (rc)
2031 return rc;
2032
2033 if (agg_rings) {
2034 u16 mem_size;
2035
2036 ring = &rxr->rx_agg_ring_struct;
2037 rc = bnxt_alloc_ring(bp, ring);
2038 if (rc)
2039 return rc;
2040
2041 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2042 mem_size = rxr->rx_agg_bmap_size / 8;
2043 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2044 if (!rxr->rx_agg_bmap)
2045 return -ENOMEM;
2046
2047 if (tpa_rings) {
2048 rxr->rx_tpa = kcalloc(MAX_TPA,
2049 sizeof(struct bnxt_tpa_info),
2050 GFP_KERNEL);
2051 if (!rxr->rx_tpa)
2052 return -ENOMEM;
2053 }
2054 }
2055 }
2056 return 0;
2057}
2058
2059static void bnxt_free_tx_rings(struct bnxt *bp)
2060{
2061 int i;
2062 struct pci_dev *pdev = bp->pdev;
2063
Michael Chanb6ab4b02016-01-02 23:44:59 -05002064 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002065 return;
2066
2067 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002068 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002069 struct bnxt_ring_struct *ring;
2070
Michael Chanc0c050c2015-10-22 16:01:17 -04002071 if (txr->tx_push) {
2072 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2073 txr->tx_push, txr->tx_push_mapping);
2074 txr->tx_push = NULL;
2075 }
2076
2077 ring = &txr->tx_ring_struct;
2078
2079 bnxt_free_ring(bp, ring);
2080 }
2081}
2082
2083static int bnxt_alloc_tx_rings(struct bnxt *bp)
2084{
2085 int i, j, rc;
2086 struct pci_dev *pdev = bp->pdev;
2087
2088 bp->tx_push_size = 0;
2089 if (bp->tx_push_thresh) {
2090 int push_size;
2091
2092 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2093 bp->tx_push_thresh);
2094
Michael Chan4419dbe2016-02-10 17:33:49 -05002095 if (push_size > 256) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002096 push_size = 0;
2097 bp->tx_push_thresh = 0;
2098 }
2099
2100 bp->tx_push_size = push_size;
2101 }
2102
2103 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002104 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002105 struct bnxt_ring_struct *ring;
2106
Michael Chanc0c050c2015-10-22 16:01:17 -04002107 ring = &txr->tx_ring_struct;
2108
2109 rc = bnxt_alloc_ring(bp, ring);
2110 if (rc)
2111 return rc;
2112
2113 if (bp->tx_push_size) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002114 dma_addr_t mapping;
2115
2116 /* One pre-allocated DMA buffer to backup
2117 * TX push operation
2118 */
2119 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2120 bp->tx_push_size,
2121 &txr->tx_push_mapping,
2122 GFP_KERNEL);
2123
2124 if (!txr->tx_push)
2125 return -ENOMEM;
2126
Michael Chanc0c050c2015-10-22 16:01:17 -04002127 mapping = txr->tx_push_mapping +
2128 sizeof(struct tx_push_bd);
Michael Chan4419dbe2016-02-10 17:33:49 -05002129 txr->data_mapping = cpu_to_le64(mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -04002130
Michael Chan4419dbe2016-02-10 17:33:49 -05002131 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
Michael Chanc0c050c2015-10-22 16:01:17 -04002132 }
2133 ring->queue_id = bp->q_info[j].queue_id;
2134 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2135 j++;
2136 }
2137 return 0;
2138}
2139
2140static void bnxt_free_cp_rings(struct bnxt *bp)
2141{
2142 int i;
2143
2144 if (!bp->bnapi)
2145 return;
2146
2147 for (i = 0; i < bp->cp_nr_rings; i++) {
2148 struct bnxt_napi *bnapi = bp->bnapi[i];
2149 struct bnxt_cp_ring_info *cpr;
2150 struct bnxt_ring_struct *ring;
2151
2152 if (!bnapi)
2153 continue;
2154
2155 cpr = &bnapi->cp_ring;
2156 ring = &cpr->cp_ring_struct;
2157
2158 bnxt_free_ring(bp, ring);
2159 }
2160}
2161
2162static int bnxt_alloc_cp_rings(struct bnxt *bp)
2163{
2164 int i, rc;
2165
2166 for (i = 0; i < bp->cp_nr_rings; i++) {
2167 struct bnxt_napi *bnapi = bp->bnapi[i];
2168 struct bnxt_cp_ring_info *cpr;
2169 struct bnxt_ring_struct *ring;
2170
2171 if (!bnapi)
2172 continue;
2173
2174 cpr = &bnapi->cp_ring;
2175 ring = &cpr->cp_ring_struct;
2176
2177 rc = bnxt_alloc_ring(bp, ring);
2178 if (rc)
2179 return rc;
2180 }
2181 return 0;
2182}
2183
2184static void bnxt_init_ring_struct(struct bnxt *bp)
2185{
2186 int i;
2187
2188 for (i = 0; i < bp->cp_nr_rings; i++) {
2189 struct bnxt_napi *bnapi = bp->bnapi[i];
2190 struct bnxt_cp_ring_info *cpr;
2191 struct bnxt_rx_ring_info *rxr;
2192 struct bnxt_tx_ring_info *txr;
2193 struct bnxt_ring_struct *ring;
2194
2195 if (!bnapi)
2196 continue;
2197
2198 cpr = &bnapi->cp_ring;
2199 ring = &cpr->cp_ring_struct;
2200 ring->nr_pages = bp->cp_nr_pages;
2201 ring->page_size = HW_CMPD_RING_SIZE;
2202 ring->pg_arr = (void **)cpr->cp_desc_ring;
2203 ring->dma_arr = cpr->cp_desc_mapping;
2204 ring->vmem_size = 0;
2205
Michael Chanb6ab4b02016-01-02 23:44:59 -05002206 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002207 if (!rxr)
2208 goto skip_rx;
2209
Michael Chanc0c050c2015-10-22 16:01:17 -04002210 ring = &rxr->rx_ring_struct;
2211 ring->nr_pages = bp->rx_nr_pages;
2212 ring->page_size = HW_RXBD_RING_SIZE;
2213 ring->pg_arr = (void **)rxr->rx_desc_ring;
2214 ring->dma_arr = rxr->rx_desc_mapping;
2215 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2216 ring->vmem = (void **)&rxr->rx_buf_ring;
2217
2218 ring = &rxr->rx_agg_ring_struct;
2219 ring->nr_pages = bp->rx_agg_nr_pages;
2220 ring->page_size = HW_RXBD_RING_SIZE;
2221 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2222 ring->dma_arr = rxr->rx_agg_desc_mapping;
2223 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2224 ring->vmem = (void **)&rxr->rx_agg_ring;
2225
Michael Chan3b2b7d92016-01-02 23:45:00 -05002226skip_rx:
Michael Chanb6ab4b02016-01-02 23:44:59 -05002227 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002228 if (!txr)
2229 continue;
2230
Michael Chanc0c050c2015-10-22 16:01:17 -04002231 ring = &txr->tx_ring_struct;
2232 ring->nr_pages = bp->tx_nr_pages;
2233 ring->page_size = HW_RXBD_RING_SIZE;
2234 ring->pg_arr = (void **)txr->tx_desc_ring;
2235 ring->dma_arr = txr->tx_desc_mapping;
2236 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2237 ring->vmem = (void **)&txr->tx_buf_ring;
2238 }
2239}
2240
2241static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2242{
2243 int i;
2244 u32 prod;
2245 struct rx_bd **rx_buf_ring;
2246
2247 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2248 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2249 int j;
2250 struct rx_bd *rxbd;
2251
2252 rxbd = rx_buf_ring[i];
2253 if (!rxbd)
2254 continue;
2255
2256 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2257 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2258 rxbd->rx_bd_opaque = prod;
2259 }
2260 }
2261}
2262
2263static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2264{
2265 struct net_device *dev = bp->dev;
Michael Chanc0c050c2015-10-22 16:01:17 -04002266 struct bnxt_rx_ring_info *rxr;
2267 struct bnxt_ring_struct *ring;
2268 u32 prod, type;
2269 int i;
2270
Michael Chanc0c050c2015-10-22 16:01:17 -04002271 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2272 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2273
2274 if (NET_IP_ALIGN == 2)
2275 type |= RX_BD_FLAGS_SOP;
2276
Michael Chanb6ab4b02016-01-02 23:44:59 -05002277 rxr = &bp->rx_ring[ring_nr];
Michael Chanc0c050c2015-10-22 16:01:17 -04002278 ring = &rxr->rx_ring_struct;
2279 bnxt_init_rxbd_pages(ring, type);
2280
2281 prod = rxr->rx_prod;
2282 for (i = 0; i < bp->rx_ring_size; i++) {
2283 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2284 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2285 ring_nr, i, bp->rx_ring_size);
2286 break;
2287 }
2288 prod = NEXT_RX(prod);
2289 }
2290 rxr->rx_prod = prod;
2291 ring->fw_ring_id = INVALID_HW_RING_ID;
2292
Michael Chanedd0c2c2015-12-27 18:19:19 -05002293 ring = &rxr->rx_agg_ring_struct;
2294 ring->fw_ring_id = INVALID_HW_RING_ID;
2295
Michael Chanc0c050c2015-10-22 16:01:17 -04002296 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2297 return 0;
2298
Michael Chan2839f282016-04-25 02:30:50 -04002299 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
Michael Chanc0c050c2015-10-22 16:01:17 -04002300 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2301
2302 bnxt_init_rxbd_pages(ring, type);
2303
2304 prod = rxr->rx_agg_prod;
2305 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2306 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2307 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2308 ring_nr, i, bp->rx_ring_size);
2309 break;
2310 }
2311 prod = NEXT_RX_AGG(prod);
2312 }
2313 rxr->rx_agg_prod = prod;
Michael Chanc0c050c2015-10-22 16:01:17 -04002314
2315 if (bp->flags & BNXT_FLAG_TPA) {
2316 if (rxr->rx_tpa) {
2317 u8 *data;
2318 dma_addr_t mapping;
2319
2320 for (i = 0; i < MAX_TPA; i++) {
2321 data = __bnxt_alloc_rx_data(bp, &mapping,
2322 GFP_KERNEL);
2323 if (!data)
2324 return -ENOMEM;
2325
2326 rxr->rx_tpa[i].data = data;
2327 rxr->rx_tpa[i].mapping = mapping;
2328 }
2329 } else {
2330 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2331 return -ENOMEM;
2332 }
2333 }
2334
2335 return 0;
2336}
2337
2338static int bnxt_init_rx_rings(struct bnxt *bp)
2339{
2340 int i, rc = 0;
2341
2342 for (i = 0; i < bp->rx_nr_rings; i++) {
2343 rc = bnxt_init_one_rx_ring(bp, i);
2344 if (rc)
2345 break;
2346 }
2347
2348 return rc;
2349}
2350
2351static int bnxt_init_tx_rings(struct bnxt *bp)
2352{
2353 u16 i;
2354
2355 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2356 MAX_SKB_FRAGS + 1);
2357
2358 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002359 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002360 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2361
2362 ring->fw_ring_id = INVALID_HW_RING_ID;
2363 }
2364
2365 return 0;
2366}
2367
2368static void bnxt_free_ring_grps(struct bnxt *bp)
2369{
2370 kfree(bp->grp_info);
2371 bp->grp_info = NULL;
2372}
2373
2374static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2375{
2376 int i;
2377
2378 if (irq_re_init) {
2379 bp->grp_info = kcalloc(bp->cp_nr_rings,
2380 sizeof(struct bnxt_ring_grp_info),
2381 GFP_KERNEL);
2382 if (!bp->grp_info)
2383 return -ENOMEM;
2384 }
2385 for (i = 0; i < bp->cp_nr_rings; i++) {
2386 if (irq_re_init)
2387 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2388 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2389 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2390 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2391 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2392 }
2393 return 0;
2394}
2395
2396static void bnxt_free_vnics(struct bnxt *bp)
2397{
2398 kfree(bp->vnic_info);
2399 bp->vnic_info = NULL;
2400 bp->nr_vnics = 0;
2401}
2402
2403static int bnxt_alloc_vnics(struct bnxt *bp)
2404{
2405 int num_vnics = 1;
2406
2407#ifdef CONFIG_RFS_ACCEL
2408 if (bp->flags & BNXT_FLAG_RFS)
2409 num_vnics += bp->rx_nr_rings;
2410#endif
2411
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04002412 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2413 num_vnics++;
2414
Michael Chanc0c050c2015-10-22 16:01:17 -04002415 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2416 GFP_KERNEL);
2417 if (!bp->vnic_info)
2418 return -ENOMEM;
2419
2420 bp->nr_vnics = num_vnics;
2421 return 0;
2422}
2423
2424static void bnxt_init_vnics(struct bnxt *bp)
2425{
2426 int i;
2427
2428 for (i = 0; i < bp->nr_vnics; i++) {
2429 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2430
2431 vnic->fw_vnic_id = INVALID_HW_RING_ID;
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04002432 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2433 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04002434 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2435
2436 if (bp->vnic_info[i].rss_hash_key) {
2437 if (i == 0)
2438 prandom_bytes(vnic->rss_hash_key,
2439 HW_HASH_KEY_SIZE);
2440 else
2441 memcpy(vnic->rss_hash_key,
2442 bp->vnic_info[0].rss_hash_key,
2443 HW_HASH_KEY_SIZE);
2444 }
2445 }
2446}
2447
2448static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2449{
2450 int pages;
2451
2452 pages = ring_size / desc_per_pg;
2453
2454 if (!pages)
2455 return 1;
2456
2457 pages++;
2458
2459 while (pages & (pages - 1))
2460 pages++;
2461
2462 return pages;
2463}
2464
2465static void bnxt_set_tpa_flags(struct bnxt *bp)
2466{
2467 bp->flags &= ~BNXT_FLAG_TPA;
2468 if (bp->dev->features & NETIF_F_LRO)
2469 bp->flags |= BNXT_FLAG_LRO;
Michael Chan94758f82016-06-13 02:25:35 -04002470 if (bp->dev->features & NETIF_F_GRO)
Michael Chanc0c050c2015-10-22 16:01:17 -04002471 bp->flags |= BNXT_FLAG_GRO;
2472}
2473
2474/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2475 * be set on entry.
2476 */
2477void bnxt_set_ring_params(struct bnxt *bp)
2478{
2479 u32 ring_size, rx_size, rx_space;
2480 u32 agg_factor = 0, agg_ring_size = 0;
2481
2482 /* 8 for CRC and VLAN */
2483 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2484
2485 rx_space = rx_size + NET_SKB_PAD +
2486 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2487
2488 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2489 ring_size = bp->rx_ring_size;
2490 bp->rx_agg_ring_size = 0;
2491 bp->rx_agg_nr_pages = 0;
2492
2493 if (bp->flags & BNXT_FLAG_TPA)
Michael Chan2839f282016-04-25 02:30:50 -04002494 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
Michael Chanc0c050c2015-10-22 16:01:17 -04002495
2496 bp->flags &= ~BNXT_FLAG_JUMBO;
2497 if (rx_space > PAGE_SIZE) {
2498 u32 jumbo_factor;
2499
2500 bp->flags |= BNXT_FLAG_JUMBO;
2501 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2502 if (jumbo_factor > agg_factor)
2503 agg_factor = jumbo_factor;
2504 }
2505 agg_ring_size = ring_size * agg_factor;
2506
2507 if (agg_ring_size) {
2508 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2509 RX_DESC_CNT);
2510 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2511 u32 tmp = agg_ring_size;
2512
2513 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2514 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2515 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2516 tmp, agg_ring_size);
2517 }
2518 bp->rx_agg_ring_size = agg_ring_size;
2519 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2520 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2521 rx_space = rx_size + NET_SKB_PAD +
2522 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2523 }
2524
2525 bp->rx_buf_use_size = rx_size;
2526 bp->rx_buf_size = rx_space;
2527
2528 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2529 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2530
2531 ring_size = bp->tx_ring_size;
2532 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2533 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2534
2535 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2536 bp->cp_ring_size = ring_size;
2537
2538 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2539 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2540 bp->cp_nr_pages = MAX_CP_PAGES;
2541 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2542 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2543 ring_size, bp->cp_ring_size);
2544 }
2545 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2546 bp->cp_ring_mask = bp->cp_bit - 1;
2547}
2548
2549static void bnxt_free_vnic_attributes(struct bnxt *bp)
2550{
2551 int i;
2552 struct bnxt_vnic_info *vnic;
2553 struct pci_dev *pdev = bp->pdev;
2554
2555 if (!bp->vnic_info)
2556 return;
2557
2558 for (i = 0; i < bp->nr_vnics; i++) {
2559 vnic = &bp->vnic_info[i];
2560
2561 kfree(vnic->fw_grp_ids);
2562 vnic->fw_grp_ids = NULL;
2563
2564 kfree(vnic->uc_list);
2565 vnic->uc_list = NULL;
2566
2567 if (vnic->mc_list) {
2568 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2569 vnic->mc_list, vnic->mc_list_mapping);
2570 vnic->mc_list = NULL;
2571 }
2572
2573 if (vnic->rss_table) {
2574 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2575 vnic->rss_table,
2576 vnic->rss_table_dma_addr);
2577 vnic->rss_table = NULL;
2578 }
2579
2580 vnic->rss_hash_key = NULL;
2581 vnic->flags = 0;
2582 }
2583}
2584
2585static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2586{
2587 int i, rc = 0, size;
2588 struct bnxt_vnic_info *vnic;
2589 struct pci_dev *pdev = bp->pdev;
2590 int max_rings;
2591
2592 for (i = 0; i < bp->nr_vnics; i++) {
2593 vnic = &bp->vnic_info[i];
2594
2595 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2596 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2597
2598 if (mem_size > 0) {
2599 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2600 if (!vnic->uc_list) {
2601 rc = -ENOMEM;
2602 goto out;
2603 }
2604 }
2605 }
2606
2607 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2608 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2609 vnic->mc_list =
2610 dma_alloc_coherent(&pdev->dev,
2611 vnic->mc_list_size,
2612 &vnic->mc_list_mapping,
2613 GFP_KERNEL);
2614 if (!vnic->mc_list) {
2615 rc = -ENOMEM;
2616 goto out;
2617 }
2618 }
2619
2620 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2621 max_rings = bp->rx_nr_rings;
2622 else
2623 max_rings = 1;
2624
2625 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2626 if (!vnic->fw_grp_ids) {
2627 rc = -ENOMEM;
2628 goto out;
2629 }
2630
2631 /* Allocate rss table and hash key */
2632 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2633 &vnic->rss_table_dma_addr,
2634 GFP_KERNEL);
2635 if (!vnic->rss_table) {
2636 rc = -ENOMEM;
2637 goto out;
2638 }
2639
2640 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2641
2642 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2643 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2644 }
2645 return 0;
2646
2647out:
2648 return rc;
2649}
2650
2651static void bnxt_free_hwrm_resources(struct bnxt *bp)
2652{
2653 struct pci_dev *pdev = bp->pdev;
2654
2655 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2656 bp->hwrm_cmd_resp_dma_addr);
2657
2658 bp->hwrm_cmd_resp_addr = NULL;
2659 if (bp->hwrm_dbg_resp_addr) {
2660 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2661 bp->hwrm_dbg_resp_addr,
2662 bp->hwrm_dbg_resp_dma_addr);
2663
2664 bp->hwrm_dbg_resp_addr = NULL;
2665 }
2666}
2667
2668static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2669{
2670 struct pci_dev *pdev = bp->pdev;
2671
2672 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2673 &bp->hwrm_cmd_resp_dma_addr,
2674 GFP_KERNEL);
2675 if (!bp->hwrm_cmd_resp_addr)
2676 return -ENOMEM;
2677 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2678 HWRM_DBG_REG_BUF_SIZE,
2679 &bp->hwrm_dbg_resp_dma_addr,
2680 GFP_KERNEL);
2681 if (!bp->hwrm_dbg_resp_addr)
2682 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2683
2684 return 0;
2685}
2686
2687static void bnxt_free_stats(struct bnxt *bp)
2688{
2689 u32 size, i;
2690 struct pci_dev *pdev = bp->pdev;
2691
Michael Chan3bdf56c2016-03-07 15:38:45 -05002692 if (bp->hw_rx_port_stats) {
2693 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2694 bp->hw_rx_port_stats,
2695 bp->hw_rx_port_stats_map);
2696 bp->hw_rx_port_stats = NULL;
2697 bp->flags &= ~BNXT_FLAG_PORT_STATS;
2698 }
2699
Michael Chanc0c050c2015-10-22 16:01:17 -04002700 if (!bp->bnapi)
2701 return;
2702
2703 size = sizeof(struct ctx_hw_stats);
2704
2705 for (i = 0; i < bp->cp_nr_rings; i++) {
2706 struct bnxt_napi *bnapi = bp->bnapi[i];
2707 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2708
2709 if (cpr->hw_stats) {
2710 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2711 cpr->hw_stats_map);
2712 cpr->hw_stats = NULL;
2713 }
2714 }
2715}
2716
2717static int bnxt_alloc_stats(struct bnxt *bp)
2718{
2719 u32 size, i;
2720 struct pci_dev *pdev = bp->pdev;
2721
2722 size = sizeof(struct ctx_hw_stats);
2723
2724 for (i = 0; i < bp->cp_nr_rings; i++) {
2725 struct bnxt_napi *bnapi = bp->bnapi[i];
2726 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2727
2728 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2729 &cpr->hw_stats_map,
2730 GFP_KERNEL);
2731 if (!cpr->hw_stats)
2732 return -ENOMEM;
2733
2734 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2735 }
Michael Chan3bdf56c2016-03-07 15:38:45 -05002736
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04002737 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05002738 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2739 sizeof(struct tx_port_stats) + 1024;
2740
2741 bp->hw_rx_port_stats =
2742 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2743 &bp->hw_rx_port_stats_map,
2744 GFP_KERNEL);
2745 if (!bp->hw_rx_port_stats)
2746 return -ENOMEM;
2747
2748 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
2749 512;
2750 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
2751 sizeof(struct rx_port_stats) + 512;
2752 bp->flags |= BNXT_FLAG_PORT_STATS;
2753 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002754 return 0;
2755}
2756
2757static void bnxt_clear_ring_indices(struct bnxt *bp)
2758{
2759 int i;
2760
2761 if (!bp->bnapi)
2762 return;
2763
2764 for (i = 0; i < bp->cp_nr_rings; i++) {
2765 struct bnxt_napi *bnapi = bp->bnapi[i];
2766 struct bnxt_cp_ring_info *cpr;
2767 struct bnxt_rx_ring_info *rxr;
2768 struct bnxt_tx_ring_info *txr;
2769
2770 if (!bnapi)
2771 continue;
2772
2773 cpr = &bnapi->cp_ring;
2774 cpr->cp_raw_cons = 0;
2775
Michael Chanb6ab4b02016-01-02 23:44:59 -05002776 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002777 if (txr) {
2778 txr->tx_prod = 0;
2779 txr->tx_cons = 0;
2780 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002781
Michael Chanb6ab4b02016-01-02 23:44:59 -05002782 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002783 if (rxr) {
2784 rxr->rx_prod = 0;
2785 rxr->rx_agg_prod = 0;
2786 rxr->rx_sw_agg_prod = 0;
Michael Chan376a5b82016-05-10 19:17:59 -04002787 rxr->rx_next_cons = 0;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002788 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002789 }
2790}
2791
2792static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2793{
2794#ifdef CONFIG_RFS_ACCEL
2795 int i;
2796
2797 /* Under rtnl_lock and all our NAPIs have been disabled. It's
2798 * safe to delete the hash table.
2799 */
2800 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2801 struct hlist_head *head;
2802 struct hlist_node *tmp;
2803 struct bnxt_ntuple_filter *fltr;
2804
2805 head = &bp->ntp_fltr_hash_tbl[i];
2806 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2807 hlist_del(&fltr->hash);
2808 kfree(fltr);
2809 }
2810 }
2811 if (irq_reinit) {
2812 kfree(bp->ntp_fltr_bmap);
2813 bp->ntp_fltr_bmap = NULL;
2814 }
2815 bp->ntp_fltr_count = 0;
2816#endif
2817}
2818
2819static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2820{
2821#ifdef CONFIG_RFS_ACCEL
2822 int i, rc = 0;
2823
2824 if (!(bp->flags & BNXT_FLAG_RFS))
2825 return 0;
2826
2827 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2828 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2829
2830 bp->ntp_fltr_count = 0;
2831 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2832 GFP_KERNEL);
2833
2834 if (!bp->ntp_fltr_bmap)
2835 rc = -ENOMEM;
2836
2837 return rc;
2838#else
2839 return 0;
2840#endif
2841}
2842
2843static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2844{
2845 bnxt_free_vnic_attributes(bp);
2846 bnxt_free_tx_rings(bp);
2847 bnxt_free_rx_rings(bp);
2848 bnxt_free_cp_rings(bp);
2849 bnxt_free_ntp_fltrs(bp, irq_re_init);
2850 if (irq_re_init) {
2851 bnxt_free_stats(bp);
2852 bnxt_free_ring_grps(bp);
2853 bnxt_free_vnics(bp);
Michael Chanb6ab4b02016-01-02 23:44:59 -05002854 kfree(bp->tx_ring);
2855 bp->tx_ring = NULL;
2856 kfree(bp->rx_ring);
2857 bp->rx_ring = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002858 kfree(bp->bnapi);
2859 bp->bnapi = NULL;
2860 } else {
2861 bnxt_clear_ring_indices(bp);
2862 }
2863}
2864
2865static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2866{
Michael Chan01657bc2016-01-02 23:45:03 -05002867 int i, j, rc, size, arr_size;
Michael Chanc0c050c2015-10-22 16:01:17 -04002868 void *bnapi;
2869
2870 if (irq_re_init) {
2871 /* Allocate bnapi mem pointer array and mem block for
2872 * all queues
2873 */
2874 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2875 bp->cp_nr_rings);
2876 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2877 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2878 if (!bnapi)
2879 return -ENOMEM;
2880
2881 bp->bnapi = bnapi;
2882 bnapi += arr_size;
2883 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2884 bp->bnapi[i] = bnapi;
2885 bp->bnapi[i]->index = i;
2886 bp->bnapi[i]->bp = bp;
2887 }
2888
Michael Chanb6ab4b02016-01-02 23:44:59 -05002889 bp->rx_ring = kcalloc(bp->rx_nr_rings,
2890 sizeof(struct bnxt_rx_ring_info),
2891 GFP_KERNEL);
2892 if (!bp->rx_ring)
2893 return -ENOMEM;
2894
2895 for (i = 0; i < bp->rx_nr_rings; i++) {
2896 bp->rx_ring[i].bnapi = bp->bnapi[i];
2897 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
2898 }
2899
2900 bp->tx_ring = kcalloc(bp->tx_nr_rings,
2901 sizeof(struct bnxt_tx_ring_info),
2902 GFP_KERNEL);
2903 if (!bp->tx_ring)
2904 return -ENOMEM;
2905
Michael Chan01657bc2016-01-02 23:45:03 -05002906 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
2907 j = 0;
2908 else
2909 j = bp->rx_nr_rings;
2910
2911 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
2912 bp->tx_ring[i].bnapi = bp->bnapi[j];
2913 bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
Michael Chanb6ab4b02016-01-02 23:44:59 -05002914 }
2915
Michael Chanc0c050c2015-10-22 16:01:17 -04002916 rc = bnxt_alloc_stats(bp);
2917 if (rc)
2918 goto alloc_mem_err;
2919
2920 rc = bnxt_alloc_ntp_fltrs(bp);
2921 if (rc)
2922 goto alloc_mem_err;
2923
2924 rc = bnxt_alloc_vnics(bp);
2925 if (rc)
2926 goto alloc_mem_err;
2927 }
2928
2929 bnxt_init_ring_struct(bp);
2930
2931 rc = bnxt_alloc_rx_rings(bp);
2932 if (rc)
2933 goto alloc_mem_err;
2934
2935 rc = bnxt_alloc_tx_rings(bp);
2936 if (rc)
2937 goto alloc_mem_err;
2938
2939 rc = bnxt_alloc_cp_rings(bp);
2940 if (rc)
2941 goto alloc_mem_err;
2942
2943 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2944 BNXT_VNIC_UCAST_FLAG;
2945 rc = bnxt_alloc_vnic_attributes(bp);
2946 if (rc)
2947 goto alloc_mem_err;
2948 return 0;
2949
2950alloc_mem_err:
2951 bnxt_free_mem(bp, true);
2952 return rc;
2953}
2954
2955void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
2956 u16 cmpl_ring, u16 target_id)
2957{
Michael Chana8643e12016-02-26 04:00:05 -05002958 struct input *req = request;
Michael Chanc0c050c2015-10-22 16:01:17 -04002959
Michael Chana8643e12016-02-26 04:00:05 -05002960 req->req_type = cpu_to_le16(req_type);
2961 req->cmpl_ring = cpu_to_le16(cmpl_ring);
2962 req->target_id = cpu_to_le16(target_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04002963 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
2964}
2965
Michael Chanfbfbc482016-02-26 04:00:07 -05002966static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
2967 int timeout, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04002968{
Michael Chana11fa2b2016-05-15 03:04:47 -04002969 int i, intr_process, rc, tmo_count;
Michael Chana8643e12016-02-26 04:00:05 -05002970 struct input *req = msg;
Michael Chanc0c050c2015-10-22 16:01:17 -04002971 u32 *data = msg;
2972 __le32 *resp_len, *valid;
2973 u16 cp_ring_id, len = 0;
2974 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
2975
Michael Chana8643e12016-02-26 04:00:05 -05002976 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
Michael Chanc0c050c2015-10-22 16:01:17 -04002977 memset(resp, 0, PAGE_SIZE);
Michael Chana8643e12016-02-26 04:00:05 -05002978 cp_ring_id = le16_to_cpu(req->cmpl_ring);
Michael Chanc0c050c2015-10-22 16:01:17 -04002979 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
2980
2981 /* Write request msg to hwrm channel */
2982 __iowrite32_copy(bp->bar0, data, msg_len / 4);
2983
Michael Chane6ef2692016-03-28 19:46:05 -04002984 for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
Michael Chand79979a2016-01-07 19:56:57 -05002985 writel(0, bp->bar0 + i);
2986
Michael Chanc0c050c2015-10-22 16:01:17 -04002987 /* currently supports only one outstanding message */
2988 if (intr_process)
Michael Chana8643e12016-02-26 04:00:05 -05002989 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04002990
2991 /* Ring channel doorbell */
2992 writel(1, bp->bar0 + 0x100);
2993
Michael Chanff4fe812016-02-26 04:00:04 -05002994 if (!timeout)
2995 timeout = DFLT_HWRM_CMD_TIMEOUT;
2996
Michael Chanc0c050c2015-10-22 16:01:17 -04002997 i = 0;
Michael Chana11fa2b2016-05-15 03:04:47 -04002998 tmo_count = timeout * 40;
Michael Chanc0c050c2015-10-22 16:01:17 -04002999 if (intr_process) {
3000 /* Wait until hwrm response cmpl interrupt is processed */
3001 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
Michael Chana11fa2b2016-05-15 03:04:47 -04003002 i++ < tmo_count) {
3003 usleep_range(25, 40);
Michael Chanc0c050c2015-10-22 16:01:17 -04003004 }
3005
3006 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3007 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
Michael Chana8643e12016-02-26 04:00:05 -05003008 le16_to_cpu(req->req_type));
Michael Chanc0c050c2015-10-22 16:01:17 -04003009 return -1;
3010 }
3011 } else {
3012 /* Check if response len is updated */
3013 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
Michael Chana11fa2b2016-05-15 03:04:47 -04003014 for (i = 0; i < tmo_count; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003015 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3016 HWRM_RESP_LEN_SFT;
3017 if (len)
3018 break;
Michael Chana11fa2b2016-05-15 03:04:47 -04003019 usleep_range(25, 40);
Michael Chanc0c050c2015-10-22 16:01:17 -04003020 }
3021
Michael Chana11fa2b2016-05-15 03:04:47 -04003022 if (i >= tmo_count) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003023 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
Michael Chana8643e12016-02-26 04:00:05 -05003024 timeout, le16_to_cpu(req->req_type),
Michael Chan8578d6c2016-05-15 03:04:48 -04003025 le16_to_cpu(req->seq_id), len);
Michael Chanc0c050c2015-10-22 16:01:17 -04003026 return -1;
3027 }
3028
3029 /* Last word of resp contains valid bit */
3030 valid = bp->hwrm_cmd_resp_addr + len - 4;
Michael Chana11fa2b2016-05-15 03:04:47 -04003031 for (i = 0; i < 5; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003032 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3033 break;
Michael Chana11fa2b2016-05-15 03:04:47 -04003034 udelay(1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003035 }
3036
Michael Chana11fa2b2016-05-15 03:04:47 -04003037 if (i >= 5) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003038 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
Michael Chana8643e12016-02-26 04:00:05 -05003039 timeout, le16_to_cpu(req->req_type),
3040 le16_to_cpu(req->seq_id), len, *valid);
Michael Chanc0c050c2015-10-22 16:01:17 -04003041 return -1;
3042 }
3043 }
3044
3045 rc = le16_to_cpu(resp->error_code);
Michael Chanfbfbc482016-02-26 04:00:07 -05003046 if (rc && !silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003047 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3048 le16_to_cpu(resp->req_type),
3049 le16_to_cpu(resp->seq_id), rc);
Michael Chanfbfbc482016-02-26 04:00:07 -05003050 return rc;
3051}
3052
3053int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3054{
3055 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04003056}
3057
3058int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3059{
3060 int rc;
3061
3062 mutex_lock(&bp->hwrm_cmd_lock);
3063 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3064 mutex_unlock(&bp->hwrm_cmd_lock);
3065 return rc;
3066}
3067
Michael Chan90e209212016-02-26 04:00:08 -05003068int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3069 int timeout)
3070{
3071 int rc;
3072
3073 mutex_lock(&bp->hwrm_cmd_lock);
3074 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3075 mutex_unlock(&bp->hwrm_cmd_lock);
3076 return rc;
3077}
3078
Michael Chana1653b12016-12-07 00:26:20 -05003079int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3080 int bmap_size)
Michael Chanc0c050c2015-10-22 16:01:17 -04003081{
3082 struct hwrm_func_drv_rgtr_input req = {0};
Michael Chan25be8622016-04-05 14:09:00 -04003083 DECLARE_BITMAP(async_events_bmap, 256);
3084 u32 *events = (u32 *)async_events_bmap;
Michael Chana1653b12016-12-07 00:26:20 -05003085 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003086
3087 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3088
3089 req.enables =
Michael Chana1653b12016-12-07 00:26:20 -05003090 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
Michael Chanc0c050c2015-10-22 16:01:17 -04003091
Michael Chan25be8622016-04-05 14:09:00 -04003092 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3093 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3094 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3095
Michael Chana1653b12016-12-07 00:26:20 -05003096 if (bmap && bmap_size) {
3097 for (i = 0; i < bmap_size; i++) {
3098 if (test_bit(i, bmap))
3099 __set_bit(i, async_events_bmap);
3100 }
3101 }
3102
Michael Chan25be8622016-04-05 14:09:00 -04003103 for (i = 0; i < 8; i++)
3104 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3105
Michael Chana1653b12016-12-07 00:26:20 -05003106 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3107}
3108
3109static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3110{
3111 struct hwrm_func_drv_rgtr_input req = {0};
3112
3113 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3114
3115 req.enables =
3116 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3117 FUNC_DRV_RGTR_REQ_ENABLES_VER);
3118
Michael Chan11f15ed2016-04-05 14:08:55 -04003119 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
Michael Chanc0c050c2015-10-22 16:01:17 -04003120 req.ver_maj = DRV_VER_MAJ;
3121 req.ver_min = DRV_VER_MIN;
3122 req.ver_upd = DRV_VER_UPD;
3123
3124 if (BNXT_PF(bp)) {
Michael Chande68f5de2015-12-09 19:35:41 -05003125 DECLARE_BITMAP(vf_req_snif_bmap, 256);
Michael Chanc0c050c2015-10-22 16:01:17 -04003126 u32 *data = (u32 *)vf_req_snif_bmap;
Michael Chana1653b12016-12-07 00:26:20 -05003127 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003128
Michael Chande68f5de2015-12-09 19:35:41 -05003129 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
Michael Chanc0c050c2015-10-22 16:01:17 -04003130 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
3131 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
3132
Michael Chande68f5de2015-12-09 19:35:41 -05003133 for (i = 0; i < 8; i++)
3134 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3135
Michael Chanc0c050c2015-10-22 16:01:17 -04003136 req.enables |=
3137 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3138 }
3139
3140 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3141}
3142
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05003143static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3144{
3145 struct hwrm_func_drv_unrgtr_input req = {0};
3146
3147 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3148 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3149}
3150
Michael Chanc0c050c2015-10-22 16:01:17 -04003151static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3152{
3153 u32 rc = 0;
3154 struct hwrm_tunnel_dst_port_free_input req = {0};
3155
3156 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3157 req.tunnel_type = tunnel_type;
3158
3159 switch (tunnel_type) {
3160 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3161 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3162 break;
3163 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3164 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3165 break;
3166 default:
3167 break;
3168 }
3169
3170 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3171 if (rc)
3172 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3173 rc);
3174 return rc;
3175}
3176
3177static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3178 u8 tunnel_type)
3179{
3180 u32 rc = 0;
3181 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3182 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3183
3184 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3185
3186 req.tunnel_type = tunnel_type;
3187 req.tunnel_dst_port_val = port;
3188
3189 mutex_lock(&bp->hwrm_cmd_lock);
3190 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3191 if (rc) {
3192 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3193 rc);
3194 goto err_out;
3195 }
3196
Christophe Jaillet57aac712016-11-22 06:14:40 +01003197 switch (tunnel_type) {
3198 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
Michael Chanc0c050c2015-10-22 16:01:17 -04003199 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01003200 break;
3201 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
Michael Chanc0c050c2015-10-22 16:01:17 -04003202 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01003203 break;
3204 default:
3205 break;
3206 }
3207
Michael Chanc0c050c2015-10-22 16:01:17 -04003208err_out:
3209 mutex_unlock(&bp->hwrm_cmd_lock);
3210 return rc;
3211}
3212
3213static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3214{
3215 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3216 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3217
3218 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -05003219 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003220
3221 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3222 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3223 req.mask = cpu_to_le32(vnic->rx_mask);
3224 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3225}
3226
3227#ifdef CONFIG_RFS_ACCEL
3228static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3229 struct bnxt_ntuple_filter *fltr)
3230{
3231 struct hwrm_cfa_ntuple_filter_free_input req = {0};
3232
3233 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3234 req.ntuple_filter_id = fltr->filter_id;
3235 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3236}
3237
3238#define BNXT_NTP_FLTR_FLAGS \
3239 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
3240 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
3241 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
3242 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
3243 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
3244 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
3245 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
3246 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
3247 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
3248 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
3249 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
3250 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
3251 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
Michael Chanc1935542015-12-27 18:19:28 -05003252 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04003253
3254static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3255 struct bnxt_ntuple_filter *fltr)
3256{
3257 int rc = 0;
3258 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3259 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3260 bp->hwrm_cmd_resp_addr;
3261 struct flow_keys *keys = &fltr->fkeys;
3262 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3263
3264 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
Michael Chana54c4d72016-07-25 12:33:35 -04003265 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04003266
3267 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3268
3269 req.ethertype = htons(ETH_P_IP);
3270 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
Michael Chanc1935542015-12-27 18:19:28 -05003271 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
Michael Chanc0c050c2015-10-22 16:01:17 -04003272 req.ip_protocol = keys->basic.ip_proto;
3273
3274 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3275 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3276 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3277 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3278
3279 req.src_port = keys->ports.src;
3280 req.src_port_mask = cpu_to_be16(0xffff);
3281 req.dst_port = keys->ports.dst;
3282 req.dst_port_mask = cpu_to_be16(0xffff);
3283
Michael Chanc1935542015-12-27 18:19:28 -05003284 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003285 mutex_lock(&bp->hwrm_cmd_lock);
3286 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3287 if (!rc)
3288 fltr->filter_id = resp->ntuple_filter_id;
3289 mutex_unlock(&bp->hwrm_cmd_lock);
3290 return rc;
3291}
3292#endif
3293
3294static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3295 u8 *mac_addr)
3296{
3297 u32 rc = 0;
3298 struct hwrm_cfa_l2_filter_alloc_input req = {0};
3299 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3300
3301 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003302 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3303 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3304 req.flags |=
3305 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
Michael Chanc1935542015-12-27 18:19:28 -05003306 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003307 req.enables =
3308 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
Michael Chanc1935542015-12-27 18:19:28 -05003309 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
Michael Chanc0c050c2015-10-22 16:01:17 -04003310 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3311 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3312 req.l2_addr_mask[0] = 0xff;
3313 req.l2_addr_mask[1] = 0xff;
3314 req.l2_addr_mask[2] = 0xff;
3315 req.l2_addr_mask[3] = 0xff;
3316 req.l2_addr_mask[4] = 0xff;
3317 req.l2_addr_mask[5] = 0xff;
3318
3319 mutex_lock(&bp->hwrm_cmd_lock);
3320 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3321 if (!rc)
3322 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3323 resp->l2_filter_id;
3324 mutex_unlock(&bp->hwrm_cmd_lock);
3325 return rc;
3326}
3327
3328static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3329{
3330 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3331 int rc = 0;
3332
3333 /* Any associated ntuple filters will also be cleared by firmware. */
3334 mutex_lock(&bp->hwrm_cmd_lock);
3335 for (i = 0; i < num_of_vnics; i++) {
3336 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3337
3338 for (j = 0; j < vnic->uc_filter_count; j++) {
3339 struct hwrm_cfa_l2_filter_free_input req = {0};
3340
3341 bnxt_hwrm_cmd_hdr_init(bp, &req,
3342 HWRM_CFA_L2_FILTER_FREE, -1, -1);
3343
3344 req.l2_filter_id = vnic->fw_l2_filter_id[j];
3345
3346 rc = _hwrm_send_message(bp, &req, sizeof(req),
3347 HWRM_CMD_TIMEOUT);
3348 }
3349 vnic->uc_filter_count = 0;
3350 }
3351 mutex_unlock(&bp->hwrm_cmd_lock);
3352
3353 return rc;
3354}
3355
3356static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3357{
3358 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3359 struct hwrm_vnic_tpa_cfg_input req = {0};
3360
3361 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3362
3363 if (tpa_flags) {
3364 u16 mss = bp->dev->mtu - 40;
3365 u32 nsegs, n, segs = 0, flags;
3366
3367 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3368 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3369 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3370 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3371 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3372 if (tpa_flags & BNXT_FLAG_GRO)
3373 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3374
3375 req.flags = cpu_to_le32(flags);
3376
3377 req.enables =
3378 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
Michael Chanc1935542015-12-27 18:19:28 -05003379 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3380 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04003381
3382 /* Number of segs are log2 units, and first packet is not
3383 * included as part of this units.
3384 */
Michael Chan2839f282016-04-25 02:30:50 -04003385 if (mss <= BNXT_RX_PAGE_SIZE) {
3386 n = BNXT_RX_PAGE_SIZE / mss;
Michael Chanc0c050c2015-10-22 16:01:17 -04003387 nsegs = (MAX_SKB_FRAGS - 1) * n;
3388 } else {
Michael Chan2839f282016-04-25 02:30:50 -04003389 n = mss / BNXT_RX_PAGE_SIZE;
3390 if (mss & (BNXT_RX_PAGE_SIZE - 1))
Michael Chanc0c050c2015-10-22 16:01:17 -04003391 n++;
3392 nsegs = (MAX_SKB_FRAGS - n) / n;
3393 }
3394
3395 segs = ilog2(nsegs);
3396 req.max_agg_segs = cpu_to_le16(segs);
3397 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
Michael Chanc1935542015-12-27 18:19:28 -05003398
3399 req.min_agg_len = cpu_to_le32(512);
Michael Chanc0c050c2015-10-22 16:01:17 -04003400 }
3401 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3402
3403 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3404}
3405
3406static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3407{
3408 u32 i, j, max_rings;
3409 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3410 struct hwrm_vnic_rss_cfg_input req = {0};
3411
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003412 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04003413 return 0;
3414
3415 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3416 if (set_rss) {
Michael Chan87da7f72016-11-16 21:13:09 -05003417 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003418 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3419 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3420 max_rings = bp->rx_nr_rings - 1;
3421 else
3422 max_rings = bp->rx_nr_rings;
3423 } else {
Michael Chanc0c050c2015-10-22 16:01:17 -04003424 max_rings = 1;
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003425 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003426
3427 /* Fill the RSS indirection table with ring group ids */
3428 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3429 if (j == max_rings)
3430 j = 0;
3431 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3432 }
3433
3434 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3435 req.hash_key_tbl_addr =
3436 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3437 }
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003438 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
Michael Chanc0c050c2015-10-22 16:01:17 -04003439 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3440}
3441
3442static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3443{
3444 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3445 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3446
3447 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3448 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3449 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3450 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3451 req.enables =
3452 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3453 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3454 /* thresholds not implemented in firmware yet */
3455 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3456 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3457 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3458 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3459}
3460
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003461static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3462 u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04003463{
3464 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3465
3466 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3467 req.rss_cos_lb_ctx_id =
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003468 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
Michael Chanc0c050c2015-10-22 16:01:17 -04003469
3470 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003471 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003472}
3473
3474static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3475{
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003476 int i, j;
Michael Chanc0c050c2015-10-22 16:01:17 -04003477
3478 for (i = 0; i < bp->nr_vnics; i++) {
3479 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3480
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003481 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3482 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3483 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3484 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003485 }
3486 bp->rsscos_nr_ctxs = 0;
3487}
3488
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003489static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04003490{
3491 int rc;
3492 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3493 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3494 bp->hwrm_cmd_resp_addr;
3495
3496 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3497 -1);
3498
3499 mutex_lock(&bp->hwrm_cmd_lock);
3500 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3501 if (!rc)
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003502 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
Michael Chanc0c050c2015-10-22 16:01:17 -04003503 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3504 mutex_unlock(&bp->hwrm_cmd_lock);
3505
3506 return rc;
3507}
3508
Michael Chana588e452016-12-07 00:26:21 -05003509int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
Michael Chanc0c050c2015-10-22 16:01:17 -04003510{
Michael Chanb81a90d2016-01-02 23:45:01 -05003511 unsigned int ring = 0, grp_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04003512 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3513 struct hwrm_vnic_cfg_input req = {0};
Michael Chancf6645f2016-06-13 02:25:28 -04003514 u16 def_vlan = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003515
3516 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003517
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04003518 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
3519 /* Only RSS support for now TBD: COS & LB */
3520 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3521 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3522 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3523 VNIC_CFG_REQ_ENABLES_MRU);
3524 } else {
3525 req.rss_rule = cpu_to_le16(0xffff);
3526 }
3527
3528 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3529 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003530 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3531 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3532 } else {
3533 req.cos_rule = cpu_to_le16(0xffff);
3534 }
3535
Michael Chanc0c050c2015-10-22 16:01:17 -04003536 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05003537 ring = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003538 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05003539 ring = vnic_id - 1;
Prashant Sreedharan76595192016-07-18 07:15:22 -04003540 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
3541 ring = bp->rx_nr_rings - 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04003542
Michael Chanb81a90d2016-01-02 23:45:01 -05003543 grp_idx = bp->rx_ring[ring].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003544 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3545 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3546
3547 req.lb_rule = cpu_to_le16(0xffff);
3548 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3549 VLAN_HLEN);
3550
Michael Chancf6645f2016-06-13 02:25:28 -04003551#ifdef CONFIG_BNXT_SRIOV
3552 if (BNXT_VF(bp))
3553 def_vlan = bp->vf.vlan;
3554#endif
3555 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
Michael Chanc0c050c2015-10-22 16:01:17 -04003556 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
Michael Chana588e452016-12-07 00:26:21 -05003557 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
3558 req.flags |=
3559 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
Michael Chanc0c050c2015-10-22 16:01:17 -04003560
3561 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3562}
3563
3564static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3565{
3566 u32 rc = 0;
3567
3568 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3569 struct hwrm_vnic_free_input req = {0};
3570
3571 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3572 req.vnic_id =
3573 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3574
3575 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3576 if (rc)
3577 return rc;
3578 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3579 }
3580 return rc;
3581}
3582
3583static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3584{
3585 u16 i;
3586
3587 for (i = 0; i < bp->nr_vnics; i++)
3588 bnxt_hwrm_vnic_free_one(bp, i);
3589}
3590
Michael Chanb81a90d2016-01-02 23:45:01 -05003591static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3592 unsigned int start_rx_ring_idx,
3593 unsigned int nr_rings)
Michael Chanc0c050c2015-10-22 16:01:17 -04003594{
Michael Chanb81a90d2016-01-02 23:45:01 -05003595 int rc = 0;
3596 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04003597 struct hwrm_vnic_alloc_input req = {0};
3598 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3599
3600 /* map ring groups to this vnic */
Michael Chanb81a90d2016-01-02 23:45:01 -05003601 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3602 grp_idx = bp->rx_ring[i].bnapi->index;
3603 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003604 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05003605 j, nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04003606 break;
3607 }
3608 bp->vnic_info[vnic_id].fw_grp_ids[j] =
Michael Chanb81a90d2016-01-02 23:45:01 -05003609 bp->grp_info[grp_idx].fw_grp_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003610 }
3611
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04003612 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
3613 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003614 if (vnic_id == 0)
3615 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3616
3617 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3618
3619 mutex_lock(&bp->hwrm_cmd_lock);
3620 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3621 if (!rc)
3622 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3623 mutex_unlock(&bp->hwrm_cmd_lock);
3624 return rc;
3625}
3626
3627static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3628{
3629 u16 i;
3630 u32 rc = 0;
3631
3632 mutex_lock(&bp->hwrm_cmd_lock);
3633 for (i = 0; i < bp->rx_nr_rings; i++) {
3634 struct hwrm_ring_grp_alloc_input req = {0};
3635 struct hwrm_ring_grp_alloc_output *resp =
3636 bp->hwrm_cmd_resp_addr;
Michael Chanb81a90d2016-01-02 23:45:01 -05003637 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003638
3639 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3640
Michael Chanb81a90d2016-01-02 23:45:01 -05003641 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
3642 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
3643 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
3644 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
Michael Chanc0c050c2015-10-22 16:01:17 -04003645
3646 rc = _hwrm_send_message(bp, &req, sizeof(req),
3647 HWRM_CMD_TIMEOUT);
3648 if (rc)
3649 break;
3650
Michael Chanb81a90d2016-01-02 23:45:01 -05003651 bp->grp_info[grp_idx].fw_grp_id =
3652 le32_to_cpu(resp->ring_group_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003653 }
3654 mutex_unlock(&bp->hwrm_cmd_lock);
3655 return rc;
3656}
3657
3658static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3659{
3660 u16 i;
3661 u32 rc = 0;
3662 struct hwrm_ring_grp_free_input req = {0};
3663
3664 if (!bp->grp_info)
3665 return 0;
3666
3667 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3668
3669 mutex_lock(&bp->hwrm_cmd_lock);
3670 for (i = 0; i < bp->cp_nr_rings; i++) {
3671 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3672 continue;
3673 req.ring_group_id =
3674 cpu_to_le32(bp->grp_info[i].fw_grp_id);
3675
3676 rc = _hwrm_send_message(bp, &req, sizeof(req),
3677 HWRM_CMD_TIMEOUT);
3678 if (rc)
3679 break;
3680 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3681 }
3682 mutex_unlock(&bp->hwrm_cmd_lock);
3683 return rc;
3684}
3685
3686static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3687 struct bnxt_ring_struct *ring,
3688 u32 ring_type, u32 map_index,
3689 u32 stats_ctx_id)
3690{
3691 int rc = 0, err = 0;
3692 struct hwrm_ring_alloc_input req = {0};
3693 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3694 u16 ring_id;
3695
3696 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3697
3698 req.enables = 0;
3699 if (ring->nr_pages > 1) {
3700 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3701 /* Page size is in log2 units */
3702 req.page_size = BNXT_PAGE_SHIFT;
3703 req.page_tbl_depth = 1;
3704 } else {
3705 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
3706 }
3707 req.fbo = 0;
3708 /* Association of ring index with doorbell index and MSIX number */
3709 req.logical_id = cpu_to_le16(map_index);
3710
3711 switch (ring_type) {
3712 case HWRM_RING_ALLOC_TX:
3713 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3714 /* Association of transmit ring with completion ring */
3715 req.cmpl_ring_id =
3716 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3717 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3718 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3719 req.queue_id = cpu_to_le16(ring->queue_id);
3720 break;
3721 case HWRM_RING_ALLOC_RX:
3722 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3723 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3724 break;
3725 case HWRM_RING_ALLOC_AGG:
3726 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3727 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3728 break;
3729 case HWRM_RING_ALLOC_CMPL:
3730 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3731 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3732 if (bp->flags & BNXT_FLAG_USING_MSIX)
3733 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3734 break;
3735 default:
3736 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3737 ring_type);
3738 return -1;
3739 }
3740
3741 mutex_lock(&bp->hwrm_cmd_lock);
3742 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3743 err = le16_to_cpu(resp->error_code);
3744 ring_id = le16_to_cpu(resp->ring_id);
3745 mutex_unlock(&bp->hwrm_cmd_lock);
3746
3747 if (rc || err) {
3748 switch (ring_type) {
3749 case RING_FREE_REQ_RING_TYPE_CMPL:
3750 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3751 rc, err);
3752 return -1;
3753
3754 case RING_FREE_REQ_RING_TYPE_RX:
3755 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3756 rc, err);
3757 return -1;
3758
3759 case RING_FREE_REQ_RING_TYPE_TX:
3760 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3761 rc, err);
3762 return -1;
3763
3764 default:
3765 netdev_err(bp->dev, "Invalid ring\n");
3766 return -1;
3767 }
3768 }
3769 ring->fw_ring_id = ring_id;
3770 return rc;
3771}
3772
3773static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3774{
3775 int i, rc = 0;
3776
Michael Chanedd0c2c2015-12-27 18:19:19 -05003777 for (i = 0; i < bp->cp_nr_rings; i++) {
3778 struct bnxt_napi *bnapi = bp->bnapi[i];
3779 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3780 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04003781
Prashant Sreedharan33e52d82016-03-28 19:46:04 -04003782 cpr->cp_doorbell = bp->bar1 + i * 0x80;
Michael Chanedd0c2c2015-12-27 18:19:19 -05003783 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3784 INVALID_STATS_CTX_ID);
3785 if (rc)
3786 goto err_out;
Michael Chanedd0c2c2015-12-27 18:19:19 -05003787 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3788 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003789 }
3790
Michael Chanedd0c2c2015-12-27 18:19:19 -05003791 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003792 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003793 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003794 u32 map_idx = txr->bnapi->index;
3795 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
Michael Chanc0c050c2015-10-22 16:01:17 -04003796
Michael Chanb81a90d2016-01-02 23:45:01 -05003797 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
3798 map_idx, fw_stats_ctx);
Michael Chanedd0c2c2015-12-27 18:19:19 -05003799 if (rc)
3800 goto err_out;
Michael Chanb81a90d2016-01-02 23:45:01 -05003801 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanc0c050c2015-10-22 16:01:17 -04003802 }
3803
Michael Chanedd0c2c2015-12-27 18:19:19 -05003804 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003805 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003806 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003807 u32 map_idx = rxr->bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04003808
Michael Chanb81a90d2016-01-02 23:45:01 -05003809 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
3810 map_idx, INVALID_STATS_CTX_ID);
Michael Chanedd0c2c2015-12-27 18:19:19 -05003811 if (rc)
3812 goto err_out;
Michael Chanb81a90d2016-01-02 23:45:01 -05003813 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanedd0c2c2015-12-27 18:19:19 -05003814 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
Michael Chanb81a90d2016-01-02 23:45:01 -05003815 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003816 }
3817
3818 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3819 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003820 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04003821 struct bnxt_ring_struct *ring =
3822 &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003823 u32 grp_idx = rxr->bnapi->index;
3824 u32 map_idx = grp_idx + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04003825
3826 rc = hwrm_ring_alloc_send_msg(bp, ring,
3827 HWRM_RING_ALLOC_AGG,
Michael Chanb81a90d2016-01-02 23:45:01 -05003828 map_idx,
Michael Chanc0c050c2015-10-22 16:01:17 -04003829 INVALID_STATS_CTX_ID);
3830 if (rc)
3831 goto err_out;
3832
Michael Chanb81a90d2016-01-02 23:45:01 -05003833 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
Michael Chanc0c050c2015-10-22 16:01:17 -04003834 writel(DB_KEY_RX | rxr->rx_agg_prod,
3835 rxr->rx_agg_doorbell);
Michael Chanb81a90d2016-01-02 23:45:01 -05003836 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003837 }
3838 }
3839err_out:
3840 return rc;
3841}
3842
3843static int hwrm_ring_free_send_msg(struct bnxt *bp,
3844 struct bnxt_ring_struct *ring,
3845 u32 ring_type, int cmpl_ring_id)
3846{
3847 int rc;
3848 struct hwrm_ring_free_input req = {0};
3849 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3850 u16 error_code;
3851
Prashant Sreedharan74608fc2016-01-28 03:11:20 -05003852 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003853 req.ring_type = ring_type;
3854 req.ring_id = cpu_to_le16(ring->fw_ring_id);
3855
3856 mutex_lock(&bp->hwrm_cmd_lock);
3857 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3858 error_code = le16_to_cpu(resp->error_code);
3859 mutex_unlock(&bp->hwrm_cmd_lock);
3860
3861 if (rc || error_code) {
3862 switch (ring_type) {
3863 case RING_FREE_REQ_RING_TYPE_CMPL:
3864 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3865 rc);
3866 return rc;
3867 case RING_FREE_REQ_RING_TYPE_RX:
3868 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3869 rc);
3870 return rc;
3871 case RING_FREE_REQ_RING_TYPE_TX:
3872 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3873 rc);
3874 return rc;
3875 default:
3876 netdev_err(bp->dev, "Invalid ring\n");
3877 return -1;
3878 }
3879 }
3880 return 0;
3881}
3882
Michael Chanedd0c2c2015-12-27 18:19:19 -05003883static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
Michael Chanc0c050c2015-10-22 16:01:17 -04003884{
Michael Chanedd0c2c2015-12-27 18:19:19 -05003885 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003886
3887 if (!bp->bnapi)
Michael Chanedd0c2c2015-12-27 18:19:19 -05003888 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04003889
Michael Chanedd0c2c2015-12-27 18:19:19 -05003890 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003891 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003892 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003893 u32 grp_idx = txr->bnapi->index;
3894 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003895
Michael Chanedd0c2c2015-12-27 18:19:19 -05003896 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3897 hwrm_ring_free_send_msg(bp, ring,
3898 RING_FREE_REQ_RING_TYPE_TX,
3899 close_path ? cmpl_ring_id :
3900 INVALID_HW_RING_ID);
3901 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003902 }
3903 }
3904
Michael Chanedd0c2c2015-12-27 18:19:19 -05003905 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003906 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003907 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003908 u32 grp_idx = rxr->bnapi->index;
3909 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003910
Michael Chanedd0c2c2015-12-27 18:19:19 -05003911 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3912 hwrm_ring_free_send_msg(bp, ring,
3913 RING_FREE_REQ_RING_TYPE_RX,
3914 close_path ? cmpl_ring_id :
3915 INVALID_HW_RING_ID);
3916 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05003917 bp->grp_info[grp_idx].rx_fw_ring_id =
3918 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003919 }
3920 }
3921
Michael Chanedd0c2c2015-12-27 18:19:19 -05003922 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05003923 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05003924 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05003925 u32 grp_idx = rxr->bnapi->index;
3926 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04003927
Michael Chanedd0c2c2015-12-27 18:19:19 -05003928 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3929 hwrm_ring_free_send_msg(bp, ring,
3930 RING_FREE_REQ_RING_TYPE_RX,
3931 close_path ? cmpl_ring_id :
3932 INVALID_HW_RING_ID);
3933 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05003934 bp->grp_info[grp_idx].agg_fw_ring_id =
3935 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003936 }
3937 }
3938
Michael Chanedd0c2c2015-12-27 18:19:19 -05003939 for (i = 0; i < bp->cp_nr_rings; i++) {
3940 struct bnxt_napi *bnapi = bp->bnapi[i];
3941 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3942 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chanc0c050c2015-10-22 16:01:17 -04003943
Michael Chanedd0c2c2015-12-27 18:19:19 -05003944 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3945 hwrm_ring_free_send_msg(bp, ring,
3946 RING_FREE_REQ_RING_TYPE_CMPL,
3947 INVALID_HW_RING_ID);
3948 ring->fw_ring_id = INVALID_HW_RING_ID;
3949 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04003950 }
3951 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003952}
3953
Michael Chanbb053f52016-02-26 04:00:02 -05003954static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
3955 u32 buf_tmrs, u16 flags,
3956 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3957{
3958 req->flags = cpu_to_le16(flags);
3959 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
3960 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
3961 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
3962 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
3963 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
3964 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
3965 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
3966 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
3967}
3968
Michael Chanc0c050c2015-10-22 16:01:17 -04003969int bnxt_hwrm_set_coal(struct bnxt *bp)
3970{
3971 int i, rc = 0;
Michael Chandfc9c942016-02-26 04:00:03 -05003972 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
3973 req_tx = {0}, *req;
Michael Chanc0c050c2015-10-22 16:01:17 -04003974 u16 max_buf, max_buf_irq;
3975 u16 buf_tmr, buf_tmr_irq;
3976 u32 flags;
3977
Michael Chandfc9c942016-02-26 04:00:03 -05003978 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
3979 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
3980 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
3981 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003982
Michael Chandfb5b892016-02-26 04:00:01 -05003983 /* Each rx completion (2 records) should be DMAed immediately.
3984 * DMA 1/4 of the completion buffers at a time.
3985 */
3986 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
Michael Chanc0c050c2015-10-22 16:01:17 -04003987 /* max_buf must not be zero */
3988 max_buf = clamp_t(u16, max_buf, 1, 63);
Michael Chandfb5b892016-02-26 04:00:01 -05003989 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
3990 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
3991 /* buf timer set to 1/4 of interrupt timer */
3992 buf_tmr = max_t(u16, buf_tmr / 4, 1);
3993 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
3994 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003995
3996 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3997
3998 /* RING_IDLE generates more IRQs for lower latency. Enable it only
3999 * if coal_ticks is less than 25 us.
4000 */
Michael Chandfb5b892016-02-26 04:00:01 -05004001 if (bp->rx_coal_ticks < 25)
Michael Chanc0c050c2015-10-22 16:01:17 -04004002 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4003
Michael Chanbb053f52016-02-26 04:00:02 -05004004 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
Michael Chandfc9c942016-02-26 04:00:03 -05004005 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4006
4007 /* max_buf must not be zero */
4008 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4009 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4010 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4011 /* buf timer set to 1/4 of interrupt timer */
4012 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4013 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4014 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4015
4016 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4017 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4018 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004019
4020 mutex_lock(&bp->hwrm_cmd_lock);
4021 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chandfc9c942016-02-26 04:00:03 -05004022 struct bnxt_napi *bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04004023
Michael Chandfc9c942016-02-26 04:00:03 -05004024 req = &req_rx;
4025 if (!bnapi->rx_ring)
4026 req = &req_tx;
4027 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4028
4029 rc = _hwrm_send_message(bp, req, sizeof(*req),
Michael Chanc0c050c2015-10-22 16:01:17 -04004030 HWRM_CMD_TIMEOUT);
4031 if (rc)
4032 break;
4033 }
4034 mutex_unlock(&bp->hwrm_cmd_lock);
4035 return rc;
4036}
4037
4038static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4039{
4040 int rc = 0, i;
4041 struct hwrm_stat_ctx_free_input req = {0};
4042
4043 if (!bp->bnapi)
4044 return 0;
4045
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004046 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4047 return 0;
4048
Michael Chanc0c050c2015-10-22 16:01:17 -04004049 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4050
4051 mutex_lock(&bp->hwrm_cmd_lock);
4052 for (i = 0; i < bp->cp_nr_rings; i++) {
4053 struct bnxt_napi *bnapi = bp->bnapi[i];
4054 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4055
4056 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4057 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4058
4059 rc = _hwrm_send_message(bp, &req, sizeof(req),
4060 HWRM_CMD_TIMEOUT);
4061 if (rc)
4062 break;
4063
4064 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4065 }
4066 }
4067 mutex_unlock(&bp->hwrm_cmd_lock);
4068 return rc;
4069}
4070
4071static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4072{
4073 int rc = 0, i;
4074 struct hwrm_stat_ctx_alloc_input req = {0};
4075 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4076
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004077 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4078 return 0;
4079
Michael Chanc0c050c2015-10-22 16:01:17 -04004080 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4081
Michael Chan51f30782016-07-01 18:46:29 -04004082 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
Michael Chanc0c050c2015-10-22 16:01:17 -04004083
4084 mutex_lock(&bp->hwrm_cmd_lock);
4085 for (i = 0; i < bp->cp_nr_rings; i++) {
4086 struct bnxt_napi *bnapi = bp->bnapi[i];
4087 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4088
4089 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4090
4091 rc = _hwrm_send_message(bp, &req, sizeof(req),
4092 HWRM_CMD_TIMEOUT);
4093 if (rc)
4094 break;
4095
4096 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4097
4098 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4099 }
4100 mutex_unlock(&bp->hwrm_cmd_lock);
Pan Bian89aa8442016-12-03 17:56:17 +08004101 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04004102}
4103
Michael Chancf6645f2016-06-13 02:25:28 -04004104static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4105{
4106 struct hwrm_func_qcfg_input req = {0};
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04004107 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chancf6645f2016-06-13 02:25:28 -04004108 int rc;
4109
4110 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4111 req.fid = cpu_to_le16(0xffff);
4112 mutex_lock(&bp->hwrm_cmd_lock);
4113 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4114 if (rc)
4115 goto func_qcfg_exit;
4116
4117#ifdef CONFIG_BNXT_SRIOV
4118 if (BNXT_VF(bp)) {
Michael Chancf6645f2016-06-13 02:25:28 -04004119 struct bnxt_vf_info *vf = &bp->vf;
4120
4121 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4122 }
4123#endif
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04004124 switch (resp->port_partition_type) {
4125 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4126 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4127 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4128 bp->port_partition_type = resp->port_partition_type;
4129 break;
4130 }
Michael Chancf6645f2016-06-13 02:25:28 -04004131
4132func_qcfg_exit:
4133 mutex_unlock(&bp->hwrm_cmd_lock);
4134 return rc;
4135}
4136
Michael Chan7b08f662016-12-07 00:26:18 -05004137static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04004138{
4139 int rc = 0;
4140 struct hwrm_func_qcaps_input req = {0};
4141 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4142
4143 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4144 req.fid = cpu_to_le16(0xffff);
4145
4146 mutex_lock(&bp->hwrm_cmd_lock);
4147 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4148 if (rc)
4149 goto hwrm_func_qcaps_exit;
4150
Michael Chane4060d32016-12-07 00:26:19 -05004151 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
4152 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
4153 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
4154 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
4155
Michael Chan7cc5a202016-09-19 03:58:05 -04004156 bp->tx_push_thresh = 0;
4157 if (resp->flags &
4158 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4159 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4160
Michael Chanc0c050c2015-10-22 16:01:17 -04004161 if (BNXT_PF(bp)) {
4162 struct bnxt_pf_info *pf = &bp->pf;
4163
4164 pf->fw_fid = le16_to_cpu(resp->fid);
4165 pf->port_id = le16_to_cpu(resp->port_id);
Michael Chan87027db2016-07-01 18:46:28 -04004166 bp->dev->dev_port = pf->port_id;
Michael Chan11f15ed2016-04-05 14:08:55 -04004167 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
Jeffrey Huangbdd43472015-12-02 01:54:07 -05004168 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04004169 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4170 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4171 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004172 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05004173 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4174 if (!pf->max_hw_ring_grps)
4175 pf->max_hw_ring_grps = pf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004176 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4177 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4178 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4179 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4180 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4181 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4182 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4183 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4184 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4185 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4186 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
4187 } else {
Michael Chan379a80a2015-10-23 15:06:19 -04004188#ifdef CONFIG_BNXT_SRIOV
Michael Chanc0c050c2015-10-22 16:01:17 -04004189 struct bnxt_vf_info *vf = &bp->vf;
4190
4191 vf->fw_fid = le16_to_cpu(resp->fid);
Michael Chanc0c050c2015-10-22 16:01:17 -04004192
4193 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4194 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4195 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4196 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
Michael Chanb72d4a62015-12-27 18:19:27 -05004197 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4198 if (!vf->max_hw_ring_grps)
4199 vf->max_hw_ring_grps = vf->max_tx_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004200 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4201 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4202 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
Michael Chan7cc5a202016-09-19 03:58:05 -04004203
4204 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
Michael Chan001154e2016-09-19 03:58:06 -04004205 mutex_unlock(&bp->hwrm_cmd_lock);
4206
4207 if (is_valid_ether_addr(vf->mac_addr)) {
Michael Chan7cc5a202016-09-19 03:58:05 -04004208 /* overwrite netdev dev_adr with admin VF MAC */
4209 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
Michael Chan001154e2016-09-19 03:58:06 -04004210 } else {
Michael Chan7cc5a202016-09-19 03:58:05 -04004211 random_ether_addr(bp->dev->dev_addr);
Michael Chan001154e2016-09-19 03:58:06 -04004212 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4213 }
4214 return rc;
Michael Chan379a80a2015-10-23 15:06:19 -04004215#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04004216 }
4217
Michael Chanc0c050c2015-10-22 16:01:17 -04004218hwrm_func_qcaps_exit:
4219 mutex_unlock(&bp->hwrm_cmd_lock);
4220 return rc;
4221}
4222
4223static int bnxt_hwrm_func_reset(struct bnxt *bp)
4224{
4225 struct hwrm_func_reset_input req = {0};
4226
4227 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4228 req.enables = 0;
4229
4230 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4231}
4232
4233static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4234{
4235 int rc = 0;
4236 struct hwrm_queue_qportcfg_input req = {0};
4237 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4238 u8 i, *qptr;
4239
4240 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4241
4242 mutex_lock(&bp->hwrm_cmd_lock);
4243 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4244 if (rc)
4245 goto qportcfg_exit;
4246
4247 if (!resp->max_configurable_queues) {
4248 rc = -EINVAL;
4249 goto qportcfg_exit;
4250 }
4251 bp->max_tc = resp->max_configurable_queues;
Michael Chan87c374d2016-12-02 21:17:16 -05004252 bp->max_lltc = resp->max_configurable_lossless_queues;
Michael Chanc0c050c2015-10-22 16:01:17 -04004253 if (bp->max_tc > BNXT_MAX_QUEUE)
4254 bp->max_tc = BNXT_MAX_QUEUE;
4255
Michael Chan441cabb2016-09-19 03:58:02 -04004256 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4257 bp->max_tc = 1;
4258
Michael Chan87c374d2016-12-02 21:17:16 -05004259 if (bp->max_lltc > bp->max_tc)
4260 bp->max_lltc = bp->max_tc;
4261
Michael Chanc0c050c2015-10-22 16:01:17 -04004262 qptr = &resp->queue_id0;
4263 for (i = 0; i < bp->max_tc; i++) {
4264 bp->q_info[i].queue_id = *qptr++;
4265 bp->q_info[i].queue_profile = *qptr++;
4266 }
4267
4268qportcfg_exit:
4269 mutex_unlock(&bp->hwrm_cmd_lock);
4270 return rc;
4271}
4272
4273static int bnxt_hwrm_ver_get(struct bnxt *bp)
4274{
4275 int rc;
4276 struct hwrm_ver_get_input req = {0};
4277 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
4278
Michael Chane6ef2692016-03-28 19:46:05 -04004279 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
Michael Chanc0c050c2015-10-22 16:01:17 -04004280 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4281 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4282 req.hwrm_intf_min = HWRM_VERSION_MINOR;
4283 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4284 mutex_lock(&bp->hwrm_cmd_lock);
4285 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4286 if (rc)
4287 goto hwrm_ver_get_exit;
4288
4289 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4290
Michael Chan11f15ed2016-04-05 14:08:55 -04004291 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4292 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
Michael Chanc1935542015-12-27 18:19:28 -05004293 if (resp->hwrm_intf_maj < 1) {
4294 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04004295 resp->hwrm_intf_maj, resp->hwrm_intf_min,
Michael Chanc1935542015-12-27 18:19:28 -05004296 resp->hwrm_intf_upd);
4297 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04004298 }
Rob Swindell3ebf6f02016-02-26 04:00:06 -05004299 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
Michael Chanc0c050c2015-10-22 16:01:17 -04004300 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4301 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4302
Michael Chanff4fe812016-02-26 04:00:04 -05004303 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4304 if (!bp->hwrm_cmd_timeout)
4305 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4306
Michael Chane6ef2692016-03-28 19:46:05 -04004307 if (resp->hwrm_intf_maj >= 1)
4308 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4309
Michael Chan659c8052016-06-13 02:25:33 -04004310 bp->chip_num = le16_to_cpu(resp->chip_num);
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04004311 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4312 !resp->chip_metal)
4313 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
Michael Chan659c8052016-06-13 02:25:33 -04004314
Michael Chanc0c050c2015-10-22 16:01:17 -04004315hwrm_ver_get_exit:
4316 mutex_unlock(&bp->hwrm_cmd_lock);
4317 return rc;
4318}
4319
Rob Swindell5ac67d82016-09-19 03:58:03 -04004320int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4321{
Rob Swindell878786d2016-09-20 03:36:33 -04004322#if IS_ENABLED(CONFIG_RTC_LIB)
Rob Swindell5ac67d82016-09-19 03:58:03 -04004323 struct hwrm_fw_set_time_input req = {0};
4324 struct rtc_time tm;
4325 struct timeval tv;
4326
4327 if (bp->hwrm_spec_code < 0x10400)
4328 return -EOPNOTSUPP;
4329
4330 do_gettimeofday(&tv);
4331 rtc_time_to_tm(tv.tv_sec, &tm);
4332 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4333 req.year = cpu_to_le16(1900 + tm.tm_year);
4334 req.month = 1 + tm.tm_mon;
4335 req.day = tm.tm_mday;
4336 req.hour = tm.tm_hour;
4337 req.minute = tm.tm_min;
4338 req.second = tm.tm_sec;
4339 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Rob Swindell878786d2016-09-20 03:36:33 -04004340#else
4341 return -EOPNOTSUPP;
4342#endif
Rob Swindell5ac67d82016-09-19 03:58:03 -04004343}
4344
Michael Chan3bdf56c2016-03-07 15:38:45 -05004345static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4346{
4347 int rc;
4348 struct bnxt_pf_info *pf = &bp->pf;
4349 struct hwrm_port_qstats_input req = {0};
4350
4351 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4352 return 0;
4353
4354 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4355 req.port_id = cpu_to_le16(pf->port_id);
4356 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4357 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4358 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4359 return rc;
4360}
4361
Michael Chanc0c050c2015-10-22 16:01:17 -04004362static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4363{
4364 if (bp->vxlan_port_cnt) {
4365 bnxt_hwrm_tunnel_dst_port_free(
4366 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4367 }
4368 bp->vxlan_port_cnt = 0;
4369 if (bp->nge_port_cnt) {
4370 bnxt_hwrm_tunnel_dst_port_free(
4371 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4372 }
4373 bp->nge_port_cnt = 0;
4374}
4375
4376static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4377{
4378 int rc, i;
4379 u32 tpa_flags = 0;
4380
4381 if (set_tpa)
4382 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4383 for (i = 0; i < bp->nr_vnics; i++) {
4384 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4385 if (rc) {
4386 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4387 rc, i);
4388 return rc;
4389 }
4390 }
4391 return 0;
4392}
4393
4394static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4395{
4396 int i;
4397
4398 for (i = 0; i < bp->nr_vnics; i++)
4399 bnxt_hwrm_vnic_set_rss(bp, i, false);
4400}
4401
4402static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4403 bool irq_re_init)
4404{
4405 if (bp->vnic_info) {
4406 bnxt_hwrm_clear_vnic_filter(bp);
4407 /* clear all RSS setting before free vnic ctx */
4408 bnxt_hwrm_clear_vnic_rss(bp);
4409 bnxt_hwrm_vnic_ctx_free(bp);
4410 /* before free the vnic, undo the vnic tpa settings */
4411 if (bp->flags & BNXT_FLAG_TPA)
4412 bnxt_set_tpa(bp, false);
4413 bnxt_hwrm_vnic_free(bp);
4414 }
4415 bnxt_hwrm_ring_free(bp, close_path);
4416 bnxt_hwrm_ring_grp_free(bp);
4417 if (irq_re_init) {
4418 bnxt_hwrm_stat_ctx_free(bp);
4419 bnxt_hwrm_free_tunnel_ports(bp);
4420 }
4421}
4422
4423static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4424{
4425 int rc;
4426
4427 /* allocate context for vnic */
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004428 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
Michael Chanc0c050c2015-10-22 16:01:17 -04004429 if (rc) {
4430 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4431 vnic_id, rc);
4432 goto vnic_setup_err;
4433 }
4434 bp->rsscos_nr_ctxs++;
4435
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004436 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4437 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
4438 if (rc) {
4439 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
4440 vnic_id, rc);
4441 goto vnic_setup_err;
4442 }
4443 bp->rsscos_nr_ctxs++;
4444 }
4445
Michael Chanc0c050c2015-10-22 16:01:17 -04004446 /* configure default vnic, ring grp */
4447 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4448 if (rc) {
4449 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4450 vnic_id, rc);
4451 goto vnic_setup_err;
4452 }
4453
4454 /* Enable RSS hashing on vnic */
4455 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4456 if (rc) {
4457 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4458 vnic_id, rc);
4459 goto vnic_setup_err;
4460 }
4461
4462 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4463 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4464 if (rc) {
4465 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4466 vnic_id, rc);
4467 }
4468 }
4469
4470vnic_setup_err:
4471 return rc;
4472}
4473
4474static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4475{
4476#ifdef CONFIG_RFS_ACCEL
4477 int i, rc = 0;
4478
4479 for (i = 0; i < bp->rx_nr_rings; i++) {
4480 u16 vnic_id = i + 1;
4481 u16 ring_id = i;
4482
4483 if (vnic_id >= bp->nr_vnics)
4484 break;
4485
4486 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
Michael Chanb81a90d2016-01-02 23:45:01 -05004487 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004488 if (rc) {
4489 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4490 vnic_id, rc);
4491 break;
4492 }
4493 rc = bnxt_setup_vnic(bp, vnic_id);
4494 if (rc)
4495 break;
4496 }
4497 return rc;
4498#else
4499 return 0;
4500#endif
4501}
4502
Michael Chan17c71ac2016-07-01 18:46:27 -04004503/* Allow PF and VF with default VLAN to be in promiscuous mode */
4504static bool bnxt_promisc_ok(struct bnxt *bp)
4505{
4506#ifdef CONFIG_BNXT_SRIOV
4507 if (BNXT_VF(bp) && !bp->vf.vlan)
4508 return false;
4509#endif
4510 return true;
4511}
4512
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004513static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
4514{
4515 unsigned int rc = 0;
4516
4517 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
4518 if (rc) {
4519 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4520 rc);
4521 return rc;
4522 }
4523
4524 rc = bnxt_hwrm_vnic_cfg(bp, 1);
4525 if (rc) {
4526 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4527 rc);
4528 return rc;
4529 }
4530 return rc;
4531}
4532
Michael Chanb664f002015-12-02 01:54:08 -05004533static int bnxt_cfg_rx_mode(struct bnxt *);
Michael Chan7d2837d2016-05-04 16:56:44 -04004534static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
Michael Chanb664f002015-12-02 01:54:08 -05004535
Michael Chanc0c050c2015-10-22 16:01:17 -04004536static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4537{
Michael Chan7d2837d2016-05-04 16:56:44 -04004538 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
Michael Chanc0c050c2015-10-22 16:01:17 -04004539 int rc = 0;
Prashant Sreedharan76595192016-07-18 07:15:22 -04004540 unsigned int rx_nr_rings = bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004541
4542 if (irq_re_init) {
4543 rc = bnxt_hwrm_stat_ctx_alloc(bp);
4544 if (rc) {
4545 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
4546 rc);
4547 goto err_out;
4548 }
4549 }
4550
4551 rc = bnxt_hwrm_ring_alloc(bp);
4552 if (rc) {
4553 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
4554 goto err_out;
4555 }
4556
4557 rc = bnxt_hwrm_ring_grp_alloc(bp);
4558 if (rc) {
4559 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
4560 goto err_out;
4561 }
4562
Prashant Sreedharan76595192016-07-18 07:15:22 -04004563 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4564 rx_nr_rings--;
4565
Michael Chanc0c050c2015-10-22 16:01:17 -04004566 /* default vnic 0 */
Prashant Sreedharan76595192016-07-18 07:15:22 -04004567 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004568 if (rc) {
4569 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
4570 goto err_out;
4571 }
4572
4573 rc = bnxt_setup_vnic(bp, 0);
4574 if (rc)
4575 goto err_out;
4576
4577 if (bp->flags & BNXT_FLAG_RFS) {
4578 rc = bnxt_alloc_rfs_vnics(bp);
4579 if (rc)
4580 goto err_out;
4581 }
4582
4583 if (bp->flags & BNXT_FLAG_TPA) {
4584 rc = bnxt_set_tpa(bp, true);
4585 if (rc)
4586 goto err_out;
4587 }
4588
4589 if (BNXT_VF(bp))
4590 bnxt_update_vf_mac(bp);
4591
4592 /* Filter for default vnic 0 */
4593 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
4594 if (rc) {
4595 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
4596 goto err_out;
4597 }
Michael Chan7d2837d2016-05-04 16:56:44 -04004598 vnic->uc_filter_count = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04004599
Michael Chan7d2837d2016-05-04 16:56:44 -04004600 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
Michael Chanc0c050c2015-10-22 16:01:17 -04004601
Michael Chan17c71ac2016-07-01 18:46:27 -04004602 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chan7d2837d2016-05-04 16:56:44 -04004603 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4604
4605 if (bp->dev->flags & IFF_ALLMULTI) {
4606 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4607 vnic->mc_list_count = 0;
4608 } else {
4609 u32 mask = 0;
4610
4611 bnxt_mc_list_updated(bp, &mask);
4612 vnic->rx_mask |= mask;
4613 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004614
Michael Chanb664f002015-12-02 01:54:08 -05004615 rc = bnxt_cfg_rx_mode(bp);
4616 if (rc)
Michael Chanc0c050c2015-10-22 16:01:17 -04004617 goto err_out;
Michael Chanc0c050c2015-10-22 16:01:17 -04004618
4619 rc = bnxt_hwrm_set_coal(bp);
4620 if (rc)
4621 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004622 rc);
4623
4624 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4625 rc = bnxt_setup_nitroa0_vnic(bp);
4626 if (rc)
4627 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
4628 rc);
4629 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004630
Michael Chancf6645f2016-06-13 02:25:28 -04004631 if (BNXT_VF(bp)) {
4632 bnxt_hwrm_func_qcfg(bp);
4633 netdev_update_features(bp->dev);
4634 }
4635
Michael Chanc0c050c2015-10-22 16:01:17 -04004636 return 0;
4637
4638err_out:
4639 bnxt_hwrm_resource_free(bp, 0, true);
4640
4641 return rc;
4642}
4643
4644static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
4645{
4646 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
4647 return 0;
4648}
4649
4650static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
4651{
4652 bnxt_init_rx_rings(bp);
4653 bnxt_init_tx_rings(bp);
4654 bnxt_init_ring_grps(bp, irq_re_init);
4655 bnxt_init_vnics(bp);
4656
4657 return bnxt_init_chip(bp, irq_re_init);
4658}
4659
4660static void bnxt_disable_int(struct bnxt *bp)
4661{
4662 int i;
4663
4664 if (!bp->bnapi)
4665 return;
4666
4667 for (i = 0; i < bp->cp_nr_rings; i++) {
4668 struct bnxt_napi *bnapi = bp->bnapi[i];
4669 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4670
4671 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4672 }
4673}
4674
4675static void bnxt_enable_int(struct bnxt *bp)
4676{
4677 int i;
4678
4679 atomic_set(&bp->intr_sem, 0);
4680 for (i = 0; i < bp->cp_nr_rings; i++) {
4681 struct bnxt_napi *bnapi = bp->bnapi[i];
4682 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4683
4684 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4685 }
4686}
4687
4688static int bnxt_set_real_num_queues(struct bnxt *bp)
4689{
4690 int rc;
4691 struct net_device *dev = bp->dev;
4692
4693 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4694 if (rc)
4695 return rc;
4696
4697 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4698 if (rc)
4699 return rc;
4700
4701#ifdef CONFIG_RFS_ACCEL
Michael Chan45019a12015-12-27 18:19:22 -05004702 if (bp->flags & BNXT_FLAG_RFS)
Michael Chanc0c050c2015-10-22 16:01:17 -04004703 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004704#endif
4705
4706 return rc;
4707}
4708
Michael Chan6e6c5a52016-01-02 23:45:02 -05004709static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
4710 bool shared)
4711{
4712 int _rx = *rx, _tx = *tx;
4713
4714 if (shared) {
4715 *rx = min_t(int, _rx, max);
4716 *tx = min_t(int, _tx, max);
4717 } else {
4718 if (max < 2)
4719 return -ENOMEM;
4720
4721 while (_rx + _tx > max) {
4722 if (_rx > _tx && _rx > 1)
4723 _rx--;
4724 else if (_tx > 1)
4725 _tx--;
4726 }
4727 *rx = _rx;
4728 *tx = _tx;
4729 }
4730 return 0;
4731}
4732
Michael Chan78095922016-12-07 00:26:16 -05004733static void bnxt_setup_msix(struct bnxt *bp)
4734{
4735 const int len = sizeof(bp->irq_tbl[0].name);
4736 struct net_device *dev = bp->dev;
4737 int tcs, i;
4738
4739 tcs = netdev_get_num_tc(dev);
4740 if (tcs > 1) {
4741 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4742 if (bp->tx_nr_rings_per_tc == 0) {
4743 netdev_reset_tc(dev);
4744 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4745 } else {
4746 int i, off, count;
4747
4748 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4749 for (i = 0; i < tcs; i++) {
4750 count = bp->tx_nr_rings_per_tc;
4751 off = i * count;
4752 netdev_set_tc_queue(dev, i, count, off);
4753 }
4754 }
4755 }
4756
4757 for (i = 0; i < bp->cp_nr_rings; i++) {
4758 char *attr;
4759
4760 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4761 attr = "TxRx";
4762 else if (i < bp->rx_nr_rings)
4763 attr = "rx";
4764 else
4765 attr = "tx";
4766
4767 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
4768 i);
4769 bp->irq_tbl[i].handler = bnxt_msix;
4770 }
4771}
4772
4773static void bnxt_setup_inta(struct bnxt *bp)
4774{
4775 const int len = sizeof(bp->irq_tbl[0].name);
4776
4777 if (netdev_get_num_tc(bp->dev))
4778 netdev_reset_tc(bp->dev);
4779
4780 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
4781 0);
4782 bp->irq_tbl[0].handler = bnxt_inta;
4783}
4784
4785static int bnxt_setup_int_mode(struct bnxt *bp)
4786{
4787 int rc;
4788
4789 if (bp->flags & BNXT_FLAG_USING_MSIX)
4790 bnxt_setup_msix(bp);
4791 else
4792 bnxt_setup_inta(bp);
4793
4794 rc = bnxt_set_real_num_queues(bp);
4795 return rc;
4796}
4797
Michael Chane4060d32016-12-07 00:26:19 -05004798unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
4799{
4800#if defined(CONFIG_BNXT_SRIOV)
4801 if (BNXT_VF(bp))
4802 return bp->vf.max_stat_ctxs;
4803#endif
4804 return bp->pf.max_stat_ctxs;
4805}
4806
Michael Chana588e452016-12-07 00:26:21 -05004807void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
4808{
4809#if defined(CONFIG_BNXT_SRIOV)
4810 if (BNXT_VF(bp))
4811 bp->vf.max_stat_ctxs = max;
4812 else
4813#endif
4814 bp->pf.max_stat_ctxs = max;
4815}
4816
Michael Chane4060d32016-12-07 00:26:19 -05004817unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
4818{
4819#if defined(CONFIG_BNXT_SRIOV)
4820 if (BNXT_VF(bp))
4821 return bp->vf.max_cp_rings;
4822#endif
4823 return bp->pf.max_cp_rings;
4824}
4825
Michael Chana588e452016-12-07 00:26:21 -05004826void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
4827{
4828#if defined(CONFIG_BNXT_SRIOV)
4829 if (BNXT_VF(bp))
4830 bp->vf.max_cp_rings = max;
4831 else
4832#endif
4833 bp->pf.max_cp_rings = max;
4834}
4835
Michael Chan78095922016-12-07 00:26:16 -05004836static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
4837{
4838#if defined(CONFIG_BNXT_SRIOV)
4839 if (BNXT_VF(bp))
4840 return bp->vf.max_irqs;
4841#endif
4842 return bp->pf.max_irqs;
4843}
4844
Michael Chan33c26572016-12-07 00:26:15 -05004845void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
4846{
4847#if defined(CONFIG_BNXT_SRIOV)
4848 if (BNXT_VF(bp))
4849 bp->vf.max_irqs = max_irqs;
4850 else
4851#endif
4852 bp->pf.max_irqs = max_irqs;
4853}
4854
Michael Chan78095922016-12-07 00:26:16 -05004855static int bnxt_init_msix(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04004856{
Michael Chan01657bc2016-01-02 23:45:03 -05004857 int i, total_vecs, rc = 0, min = 1;
Michael Chan78095922016-12-07 00:26:16 -05004858 struct msix_entry *msix_ent;
Michael Chanc0c050c2015-10-22 16:01:17 -04004859
Michael Chan78095922016-12-07 00:26:16 -05004860 total_vecs = bnxt_get_max_func_irqs(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04004861 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4862 if (!msix_ent)
4863 return -ENOMEM;
4864
4865 for (i = 0; i < total_vecs; i++) {
4866 msix_ent[i].entry = i;
4867 msix_ent[i].vector = 0;
4868 }
4869
Michael Chan01657bc2016-01-02 23:45:03 -05004870 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
4871 min = 2;
4872
4873 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
Michael Chanc0c050c2015-10-22 16:01:17 -04004874 if (total_vecs < 0) {
4875 rc = -ENODEV;
4876 goto msix_setup_exit;
4877 }
4878
4879 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4880 if (bp->irq_tbl) {
Michael Chan78095922016-12-07 00:26:16 -05004881 for (i = 0; i < total_vecs; i++)
4882 bp->irq_tbl[i].vector = msix_ent[i].vector;
Michael Chanc0c050c2015-10-22 16:01:17 -04004883
Michael Chan78095922016-12-07 00:26:16 -05004884 bp->total_irqs = total_vecs;
Michael Chanc0c050c2015-10-22 16:01:17 -04004885 /* Trim rings based upon num of vectors allocated */
Michael Chan6e6c5a52016-01-02 23:45:02 -05004886 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
Michael Chan01657bc2016-01-02 23:45:03 -05004887 total_vecs, min == 1);
Michael Chan6e6c5a52016-01-02 23:45:02 -05004888 if (rc)
4889 goto msix_setup_exit;
4890
Michael Chanc0c050c2015-10-22 16:01:17 -04004891 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
Michael Chan78095922016-12-07 00:26:16 -05004892 bp->cp_nr_rings = (min == 1) ?
4893 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
4894 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004895
Michael Chanc0c050c2015-10-22 16:01:17 -04004896 } else {
4897 rc = -ENOMEM;
4898 goto msix_setup_exit;
4899 }
4900 bp->flags |= BNXT_FLAG_USING_MSIX;
4901 kfree(msix_ent);
4902 return 0;
4903
4904msix_setup_exit:
Michael Chan78095922016-12-07 00:26:16 -05004905 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
4906 kfree(bp->irq_tbl);
4907 bp->irq_tbl = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04004908 pci_disable_msix(bp->pdev);
4909 kfree(msix_ent);
4910 return rc;
4911}
4912
Michael Chan78095922016-12-07 00:26:16 -05004913static int bnxt_init_inta(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04004914{
Michael Chanc0c050c2015-10-22 16:01:17 -04004915 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
Michael Chan78095922016-12-07 00:26:16 -05004916 if (!bp->irq_tbl)
4917 return -ENOMEM;
4918
4919 bp->total_irqs = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04004920 bp->rx_nr_rings = 1;
4921 bp->tx_nr_rings = 1;
4922 bp->cp_nr_rings = 1;
4923 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
Michael Chan01657bc2016-01-02 23:45:03 -05004924 bp->flags |= BNXT_FLAG_SHARED_RINGS;
Michael Chanc0c050c2015-10-22 16:01:17 -04004925 bp->irq_tbl[0].vector = bp->pdev->irq;
Michael Chan78095922016-12-07 00:26:16 -05004926 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04004927}
4928
Michael Chan78095922016-12-07 00:26:16 -05004929static int bnxt_init_int_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04004930{
4931 int rc = 0;
4932
4933 if (bp->flags & BNXT_FLAG_MSIX_CAP)
Michael Chan78095922016-12-07 00:26:16 -05004934 rc = bnxt_init_msix(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04004935
Michael Chan1fa72e22016-04-25 02:30:49 -04004936 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004937 /* fallback to INTA */
Michael Chan78095922016-12-07 00:26:16 -05004938 rc = bnxt_init_inta(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04004939 }
4940 return rc;
4941}
4942
Michael Chan78095922016-12-07 00:26:16 -05004943static void bnxt_clear_int_mode(struct bnxt *bp)
4944{
4945 if (bp->flags & BNXT_FLAG_USING_MSIX)
4946 pci_disable_msix(bp->pdev);
4947
4948 kfree(bp->irq_tbl);
4949 bp->irq_tbl = NULL;
4950 bp->flags &= ~BNXT_FLAG_USING_MSIX;
4951}
4952
Michael Chanc0c050c2015-10-22 16:01:17 -04004953static void bnxt_free_irq(struct bnxt *bp)
4954{
4955 struct bnxt_irq *irq;
4956 int i;
4957
4958#ifdef CONFIG_RFS_ACCEL
4959 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4960 bp->dev->rx_cpu_rmap = NULL;
4961#endif
4962 if (!bp->irq_tbl)
4963 return;
4964
4965 for (i = 0; i < bp->cp_nr_rings; i++) {
4966 irq = &bp->irq_tbl[i];
4967 if (irq->requested)
4968 free_irq(irq->vector, bp->bnapi[i]);
4969 irq->requested = 0;
4970 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004971}
4972
4973static int bnxt_request_irq(struct bnxt *bp)
4974{
Michael Chanb81a90d2016-01-02 23:45:01 -05004975 int i, j, rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04004976 unsigned long flags = 0;
4977#ifdef CONFIG_RFS_ACCEL
4978 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4979#endif
4980
4981 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4982 flags = IRQF_SHARED;
4983
Michael Chanb81a90d2016-01-02 23:45:01 -05004984 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004985 struct bnxt_irq *irq = &bp->irq_tbl[i];
4986#ifdef CONFIG_RFS_ACCEL
Michael Chanb81a90d2016-01-02 23:45:01 -05004987 if (rmap && bp->bnapi[i]->rx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004988 rc = irq_cpu_rmap_add(rmap, irq->vector);
4989 if (rc)
4990 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05004991 j);
4992 j++;
Michael Chanc0c050c2015-10-22 16:01:17 -04004993 }
4994#endif
4995 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
4996 bp->bnapi[i]);
4997 if (rc)
4998 break;
4999
5000 irq->requested = 1;
5001 }
5002 return rc;
5003}
5004
5005static void bnxt_del_napi(struct bnxt *bp)
5006{
5007 int i;
5008
5009 if (!bp->bnapi)
5010 return;
5011
5012 for (i = 0; i < bp->cp_nr_rings; i++) {
5013 struct bnxt_napi *bnapi = bp->bnapi[i];
5014
5015 napi_hash_del(&bnapi->napi);
5016 netif_napi_del(&bnapi->napi);
5017 }
Eric Dumazete5f6f562016-11-16 06:31:52 -08005018 /* We called napi_hash_del() before netif_napi_del(), we need
5019 * to respect an RCU grace period before freeing napi structures.
5020 */
5021 synchronize_net();
Michael Chanc0c050c2015-10-22 16:01:17 -04005022}
5023
5024static void bnxt_init_napi(struct bnxt *bp)
5025{
5026 int i;
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005027 unsigned int cp_nr_rings = bp->cp_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04005028 struct bnxt_napi *bnapi;
5029
5030 if (bp->flags & BNXT_FLAG_USING_MSIX) {
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005031 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5032 cp_nr_rings--;
5033 for (i = 0; i < cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005034 bnapi = bp->bnapi[i];
5035 netif_napi_add(bp->dev, &bnapi->napi,
5036 bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04005037 }
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005038 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5039 bnapi = bp->bnapi[cp_nr_rings];
5040 netif_napi_add(bp->dev, &bnapi->napi,
5041 bnxt_poll_nitroa0, 64);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04005042 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005043 } else {
5044 bnapi = bp->bnapi[0];
5045 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04005046 }
5047}
5048
5049static void bnxt_disable_napi(struct bnxt *bp)
5050{
5051 int i;
5052
5053 if (!bp->bnapi)
5054 return;
5055
Michael Chanb356a2e2016-12-29 12:13:31 -05005056 for (i = 0; i < bp->cp_nr_rings; i++)
Michael Chanc0c050c2015-10-22 16:01:17 -04005057 napi_disable(&bp->bnapi[i]->napi);
Michael Chanc0c050c2015-10-22 16:01:17 -04005058}
5059
5060static void bnxt_enable_napi(struct bnxt *bp)
5061{
5062 int i;
5063
5064 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chanfa7e2812016-05-10 19:18:00 -04005065 bp->bnapi[i]->in_reset = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04005066 napi_enable(&bp->bnapi[i]->napi);
5067 }
5068}
5069
Michael Chan7df4ae92016-12-02 21:17:17 -05005070void bnxt_tx_disable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005071{
5072 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005073 struct bnxt_tx_ring_info *txr;
5074 struct netdev_queue *txq;
5075
Michael Chanb6ab4b02016-01-02 23:44:59 -05005076 if (bp->tx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04005077 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005078 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005079 txq = netdev_get_tx_queue(bp->dev, i);
Michael Chanc0c050c2015-10-22 16:01:17 -04005080 txr->dev_state = BNXT_DEV_STATE_CLOSING;
Michael Chanc0c050c2015-10-22 16:01:17 -04005081 }
5082 }
5083 /* Stop all TX queues */
5084 netif_tx_disable(bp->dev);
5085 netif_carrier_off(bp->dev);
5086}
5087
Michael Chan7df4ae92016-12-02 21:17:17 -05005088void bnxt_tx_enable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005089{
5090 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005091 struct bnxt_tx_ring_info *txr;
5092 struct netdev_queue *txq;
5093
5094 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005095 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04005096 txq = netdev_get_tx_queue(bp->dev, i);
5097 txr->dev_state = 0;
5098 }
5099 netif_tx_wake_all_queues(bp->dev);
5100 if (bp->link_info.link_up)
5101 netif_carrier_on(bp->dev);
5102}
5103
5104static void bnxt_report_link(struct bnxt *bp)
5105{
5106 if (bp->link_info.link_up) {
5107 const char *duplex;
5108 const char *flow_ctrl;
5109 u16 speed;
5110
5111 netif_carrier_on(bp->dev);
5112 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5113 duplex = "full";
5114 else
5115 duplex = "half";
5116 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5117 flow_ctrl = "ON - receive & transmit";
5118 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5119 flow_ctrl = "ON - transmit";
5120 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5121 flow_ctrl = "ON - receive";
5122 else
5123 flow_ctrl = "none";
5124 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
5125 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
5126 speed, duplex, flow_ctrl);
Michael Chan170ce012016-04-05 14:08:57 -04005127 if (bp->flags & BNXT_FLAG_EEE_CAP)
5128 netdev_info(bp->dev, "EEE is %s\n",
5129 bp->eee.eee_active ? "active" :
5130 "not active");
Michael Chanc0c050c2015-10-22 16:01:17 -04005131 } else {
5132 netif_carrier_off(bp->dev);
5133 netdev_err(bp->dev, "NIC Link is Down\n");
5134 }
5135}
5136
Michael Chan170ce012016-04-05 14:08:57 -04005137static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5138{
5139 int rc = 0;
5140 struct hwrm_port_phy_qcaps_input req = {0};
5141 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan93ed8112016-06-13 02:25:37 -04005142 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chan170ce012016-04-05 14:08:57 -04005143
5144 if (bp->hwrm_spec_code < 0x10201)
5145 return 0;
5146
5147 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5148
5149 mutex_lock(&bp->hwrm_cmd_lock);
5150 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5151 if (rc)
5152 goto hwrm_phy_qcaps_exit;
5153
5154 if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
5155 struct ethtool_eee *eee = &bp->eee;
5156 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5157
5158 bp->flags |= BNXT_FLAG_EEE_CAP;
5159 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5160 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5161 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5162 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5163 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5164 }
Michael Chan93ed8112016-06-13 02:25:37 -04005165 link_info->support_auto_speeds =
5166 le16_to_cpu(resp->supported_speeds_auto_mode);
Michael Chan170ce012016-04-05 14:08:57 -04005167
5168hwrm_phy_qcaps_exit:
5169 mutex_unlock(&bp->hwrm_cmd_lock);
5170 return rc;
5171}
5172
Michael Chanc0c050c2015-10-22 16:01:17 -04005173static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5174{
5175 int rc = 0;
5176 struct bnxt_link_info *link_info = &bp->link_info;
5177 struct hwrm_port_phy_qcfg_input req = {0};
5178 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5179 u8 link_up = link_info->link_up;
Michael Chan286ef9d2016-11-16 21:13:08 -05005180 u16 diff;
Michael Chanc0c050c2015-10-22 16:01:17 -04005181
5182 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5183
5184 mutex_lock(&bp->hwrm_cmd_lock);
5185 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5186 if (rc) {
5187 mutex_unlock(&bp->hwrm_cmd_lock);
5188 return rc;
5189 }
5190
5191 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5192 link_info->phy_link_status = resp->link;
5193 link_info->duplex = resp->duplex;
5194 link_info->pause = resp->pause;
5195 link_info->auto_mode = resp->auto_mode;
5196 link_info->auto_pause_setting = resp->auto_pause;
Michael Chan32773602016-03-07 15:38:42 -05005197 link_info->lp_pause = resp->link_partner_adv_pause;
Michael Chanc0c050c2015-10-22 16:01:17 -04005198 link_info->force_pause_setting = resp->force_pause;
Michael Chanc1935542015-12-27 18:19:28 -05005199 link_info->duplex_setting = resp->duplex;
Michael Chanc0c050c2015-10-22 16:01:17 -04005200 if (link_info->phy_link_status == BNXT_LINK_LINK)
5201 link_info->link_speed = le16_to_cpu(resp->link_speed);
5202 else
5203 link_info->link_speed = 0;
5204 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
Michael Chanc0c050c2015-10-22 16:01:17 -04005205 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5206 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
Michael Chan32773602016-03-07 15:38:42 -05005207 link_info->lp_auto_link_speeds =
5208 le16_to_cpu(resp->link_partner_adv_speeds);
Michael Chanc0c050c2015-10-22 16:01:17 -04005209 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5210 link_info->phy_ver[0] = resp->phy_maj;
5211 link_info->phy_ver[1] = resp->phy_min;
5212 link_info->phy_ver[2] = resp->phy_bld;
5213 link_info->media_type = resp->media_type;
Michael Chan03efbec2016-04-11 04:11:11 -04005214 link_info->phy_type = resp->phy_type;
Michael Chan11f15ed2016-04-05 14:08:55 -04005215 link_info->transceiver = resp->xcvr_pkg_type;
Michael Chan170ce012016-04-05 14:08:57 -04005216 link_info->phy_addr = resp->eee_config_phy_addr &
5217 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
Ajit Khaparde42ee18f2016-05-15 03:04:44 -04005218 link_info->module_status = resp->module_status;
Michael Chanc0c050c2015-10-22 16:01:17 -04005219
Michael Chan170ce012016-04-05 14:08:57 -04005220 if (bp->flags & BNXT_FLAG_EEE_CAP) {
5221 struct ethtool_eee *eee = &bp->eee;
5222 u16 fw_speeds;
5223
5224 eee->eee_active = 0;
5225 if (resp->eee_config_phy_addr &
5226 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5227 eee->eee_active = 1;
5228 fw_speeds = le16_to_cpu(
5229 resp->link_partner_adv_eee_link_speed_mask);
5230 eee->lp_advertised =
5231 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5232 }
5233
5234 /* Pull initial EEE config */
5235 if (!chng_link_state) {
5236 if (resp->eee_config_phy_addr &
5237 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5238 eee->eee_enabled = 1;
5239
5240 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5241 eee->advertised =
5242 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5243
5244 if (resp->eee_config_phy_addr &
5245 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5246 __le32 tmr;
5247
5248 eee->tx_lpi_enabled = 1;
5249 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5250 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5251 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5252 }
5253 }
5254 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005255 /* TODO: need to add more logic to report VF link */
5256 if (chng_link_state) {
5257 if (link_info->phy_link_status == BNXT_LINK_LINK)
5258 link_info->link_up = 1;
5259 else
5260 link_info->link_up = 0;
5261 if (link_up != link_info->link_up)
5262 bnxt_report_link(bp);
5263 } else {
5264 /* alwasy link down if not require to update link state */
5265 link_info->link_up = 0;
5266 }
5267 mutex_unlock(&bp->hwrm_cmd_lock);
Michael Chan286ef9d2016-11-16 21:13:08 -05005268
5269 diff = link_info->support_auto_speeds ^ link_info->advertising;
5270 if ((link_info->support_auto_speeds | diff) !=
5271 link_info->support_auto_speeds) {
5272 /* An advertised speed is no longer supported, so we need to
5273 * update the advertisement settings. See bnxt_reset() for
5274 * comments about the rtnl_lock() sequence below.
5275 */
5276 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5277 rtnl_lock();
5278 link_info->advertising = link_info->support_auto_speeds;
5279 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
5280 (link_info->autoneg & BNXT_AUTONEG_SPEED))
5281 bnxt_hwrm_set_link_setting(bp, true, false);
5282 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5283 rtnl_unlock();
5284 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005285 return 0;
5286}
5287
Michael Chan10289be2016-05-15 03:04:49 -04005288static void bnxt_get_port_module_status(struct bnxt *bp)
5289{
5290 struct bnxt_link_info *link_info = &bp->link_info;
5291 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5292 u8 module_status;
5293
5294 if (bnxt_update_link(bp, true))
5295 return;
5296
5297 module_status = link_info->module_status;
5298 switch (module_status) {
5299 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5300 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5301 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5302 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5303 bp->pf.port_id);
5304 if (bp->hwrm_spec_code >= 0x10201) {
5305 netdev_warn(bp->dev, "Module part number %s\n",
5306 resp->phy_vendor_partnumber);
5307 }
5308 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5309 netdev_warn(bp->dev, "TX is disabled\n");
5310 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5311 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5312 }
5313}
5314
Michael Chanc0c050c2015-10-22 16:01:17 -04005315static void
5316bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5317{
5318 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
Michael Chanc9ee9512016-04-05 14:08:56 -04005319 if (bp->hwrm_spec_code >= 0x10201)
5320 req->auto_pause =
5321 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
Michael Chanc0c050c2015-10-22 16:01:17 -04005322 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5323 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5324 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
Michael Chan49b5c7a2016-03-28 19:46:06 -04005325 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
Michael Chanc0c050c2015-10-22 16:01:17 -04005326 req->enables |=
5327 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5328 } else {
5329 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5330 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5331 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5332 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5333 req->enables |=
5334 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
Michael Chanc9ee9512016-04-05 14:08:56 -04005335 if (bp->hwrm_spec_code >= 0x10201) {
5336 req->auto_pause = req->force_pause;
5337 req->enables |= cpu_to_le32(
5338 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5339 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005340 }
5341}
5342
5343static void bnxt_hwrm_set_link_common(struct bnxt *bp,
5344 struct hwrm_port_phy_cfg_input *req)
5345{
5346 u8 autoneg = bp->link_info.autoneg;
5347 u16 fw_link_speed = bp->link_info.req_link_speed;
5348 u32 advertising = bp->link_info.advertising;
5349
5350 if (autoneg & BNXT_AUTONEG_SPEED) {
5351 req->auto_mode |=
Michael Chan11f15ed2016-04-05 14:08:55 -04005352 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04005353
5354 req->enables |= cpu_to_le32(
5355 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
5356 req->auto_link_speed_mask = cpu_to_le16(advertising);
5357
5358 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
5359 req->flags |=
5360 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
5361 } else {
5362 req->force_link_speed = cpu_to_le16(fw_link_speed);
5363 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
5364 }
5365
Michael Chanc0c050c2015-10-22 16:01:17 -04005366 /* tell chimp that the setting takes effect immediately */
5367 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
5368}
5369
5370int bnxt_hwrm_set_pause(struct bnxt *bp)
5371{
5372 struct hwrm_port_phy_cfg_input req = {0};
5373 int rc;
5374
5375 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5376 bnxt_hwrm_set_pause_common(bp, &req);
5377
5378 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
5379 bp->link_info.force_link_chng)
5380 bnxt_hwrm_set_link_common(bp, &req);
5381
5382 mutex_lock(&bp->hwrm_cmd_lock);
5383 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5384 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
5385 /* since changing of pause setting doesn't trigger any link
5386 * change event, the driver needs to update the current pause
5387 * result upon successfully return of the phy_cfg command
5388 */
5389 bp->link_info.pause =
5390 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
5391 bp->link_info.auto_pause_setting = 0;
5392 if (!bp->link_info.force_link_chng)
5393 bnxt_report_link(bp);
5394 }
5395 bp->link_info.force_link_chng = false;
5396 mutex_unlock(&bp->hwrm_cmd_lock);
5397 return rc;
5398}
5399
Michael Chan939f7f02016-04-05 14:08:58 -04005400static void bnxt_hwrm_set_eee(struct bnxt *bp,
5401 struct hwrm_port_phy_cfg_input *req)
5402{
5403 struct ethtool_eee *eee = &bp->eee;
5404
5405 if (eee->eee_enabled) {
5406 u16 eee_speeds;
5407 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
5408
5409 if (eee->tx_lpi_enabled)
5410 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
5411 else
5412 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
5413
5414 req->flags |= cpu_to_le32(flags);
5415 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
5416 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
5417 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
5418 } else {
5419 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
5420 }
5421}
5422
5423int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
Michael Chanc0c050c2015-10-22 16:01:17 -04005424{
5425 struct hwrm_port_phy_cfg_input req = {0};
5426
5427 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5428 if (set_pause)
5429 bnxt_hwrm_set_pause_common(bp, &req);
5430
5431 bnxt_hwrm_set_link_common(bp, &req);
Michael Chan939f7f02016-04-05 14:08:58 -04005432
5433 if (set_eee)
5434 bnxt_hwrm_set_eee(bp, &req);
Michael Chanc0c050c2015-10-22 16:01:17 -04005435 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5436}
5437
Michael Chan33f7d552016-04-11 04:11:12 -04005438static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
5439{
5440 struct hwrm_port_phy_cfg_input req = {0};
5441
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04005442 if (!BNXT_SINGLE_PF(bp))
Michael Chan33f7d552016-04-11 04:11:12 -04005443 return 0;
5444
5445 if (pci_num_vf(bp->pdev))
5446 return 0;
5447
5448 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
Michael Chan16d663a2016-11-16 21:13:07 -05005449 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
Michael Chan33f7d552016-04-11 04:11:12 -04005450 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5451}
5452
Michael Chan939f7f02016-04-05 14:08:58 -04005453static bool bnxt_eee_config_ok(struct bnxt *bp)
5454{
5455 struct ethtool_eee *eee = &bp->eee;
5456 struct bnxt_link_info *link_info = &bp->link_info;
5457
5458 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
5459 return true;
5460
5461 if (eee->eee_enabled) {
5462 u32 advertising =
5463 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
5464
5465 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5466 eee->eee_enabled = 0;
5467 return false;
5468 }
5469 if (eee->advertised & ~advertising) {
5470 eee->advertised = advertising & eee->supported;
5471 return false;
5472 }
5473 }
5474 return true;
5475}
5476
Michael Chanc0c050c2015-10-22 16:01:17 -04005477static int bnxt_update_phy_setting(struct bnxt *bp)
5478{
5479 int rc;
5480 bool update_link = false;
5481 bool update_pause = false;
Michael Chan939f7f02016-04-05 14:08:58 -04005482 bool update_eee = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04005483 struct bnxt_link_info *link_info = &bp->link_info;
5484
5485 rc = bnxt_update_link(bp, true);
5486 if (rc) {
5487 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
5488 rc);
5489 return rc;
5490 }
5491 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
Michael Chanc9ee9512016-04-05 14:08:56 -04005492 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
5493 link_info->req_flow_ctrl)
Michael Chanc0c050c2015-10-22 16:01:17 -04005494 update_pause = true;
5495 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
5496 link_info->force_pause_setting != link_info->req_flow_ctrl)
5497 update_pause = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04005498 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5499 if (BNXT_AUTO_MODE(link_info->auto_mode))
5500 update_link = true;
5501 if (link_info->req_link_speed != link_info->force_link_speed)
5502 update_link = true;
Michael Chande730182016-02-19 19:43:20 -05005503 if (link_info->req_duplex != link_info->duplex_setting)
5504 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04005505 } else {
5506 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
5507 update_link = true;
5508 if (link_info->advertising != link_info->auto_link_speeds)
5509 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04005510 }
5511
Michael Chan16d663a2016-11-16 21:13:07 -05005512 /* The last close may have shutdown the link, so need to call
5513 * PHY_CFG to bring it back up.
5514 */
5515 if (!netif_carrier_ok(bp->dev))
5516 update_link = true;
5517
Michael Chan939f7f02016-04-05 14:08:58 -04005518 if (!bnxt_eee_config_ok(bp))
5519 update_eee = true;
5520
Michael Chanc0c050c2015-10-22 16:01:17 -04005521 if (update_link)
Michael Chan939f7f02016-04-05 14:08:58 -04005522 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
Michael Chanc0c050c2015-10-22 16:01:17 -04005523 else if (update_pause)
5524 rc = bnxt_hwrm_set_pause(bp);
5525 if (rc) {
5526 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
5527 rc);
5528 return rc;
5529 }
5530
5531 return rc;
5532}
5533
Jeffrey Huang11809492015-11-05 16:25:49 -05005534/* Common routine to pre-map certain register block to different GRC window.
5535 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
5536 * in PF and 3 windows in VF that can be customized to map in different
5537 * register blocks.
5538 */
5539static void bnxt_preset_reg_win(struct bnxt *bp)
5540{
5541 if (BNXT_PF(bp)) {
5542 /* CAG registers map to GRC window #4 */
5543 writel(BNXT_CAG_REG_BASE,
5544 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
5545 }
5546}
5547
Michael Chanc0c050c2015-10-22 16:01:17 -04005548static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5549{
5550 int rc = 0;
5551
Jeffrey Huang11809492015-11-05 16:25:49 -05005552 bnxt_preset_reg_win(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005553 netif_carrier_off(bp->dev);
5554 if (irq_re_init) {
5555 rc = bnxt_setup_int_mode(bp);
5556 if (rc) {
5557 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
5558 rc);
5559 return rc;
5560 }
5561 }
5562 if ((bp->flags & BNXT_FLAG_RFS) &&
5563 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
5564 /* disable RFS if falling back to INTA */
5565 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
5566 bp->flags &= ~BNXT_FLAG_RFS;
5567 }
5568
5569 rc = bnxt_alloc_mem(bp, irq_re_init);
5570 if (rc) {
5571 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
5572 goto open_err_free_mem;
5573 }
5574
5575 if (irq_re_init) {
5576 bnxt_init_napi(bp);
5577 rc = bnxt_request_irq(bp);
5578 if (rc) {
5579 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
5580 goto open_err;
5581 }
5582 }
5583
5584 bnxt_enable_napi(bp);
5585
5586 rc = bnxt_init_nic(bp, irq_re_init);
5587 if (rc) {
5588 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
5589 goto open_err;
5590 }
5591
5592 if (link_re_init) {
5593 rc = bnxt_update_phy_setting(bp);
5594 if (rc)
Michael Chanba41d462016-02-19 19:43:21 -05005595 netdev_warn(bp->dev, "failed to update phy settings\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04005596 }
5597
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07005598 if (irq_re_init)
Alexander Duyckad51b8e2016-06-16 12:21:19 -07005599 udp_tunnel_get_rx_info(bp->dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04005600
Michael Chancaefe522015-12-09 19:35:42 -05005601 set_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04005602 bnxt_enable_int(bp);
5603 /* Enable TX queues */
5604 bnxt_tx_enable(bp);
5605 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan10289be2016-05-15 03:04:49 -04005606 /* Poll link status and check for SFP+ module status */
5607 bnxt_get_port_module_status(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005608
5609 return 0;
5610
5611open_err:
5612 bnxt_disable_napi(bp);
5613 bnxt_del_napi(bp);
5614
5615open_err_free_mem:
5616 bnxt_free_skbs(bp);
5617 bnxt_free_irq(bp);
5618 bnxt_free_mem(bp, true);
5619 return rc;
5620}
5621
5622/* rtnl_lock held */
5623int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5624{
5625 int rc = 0;
5626
5627 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
5628 if (rc) {
5629 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
5630 dev_close(bp->dev);
5631 }
5632 return rc;
5633}
5634
5635static int bnxt_open(struct net_device *dev)
5636{
5637 struct bnxt *bp = netdev_priv(dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04005638
Michael Chanc0c050c2015-10-22 16:01:17 -04005639 return __bnxt_open_nic(bp, true, true);
5640}
5641
5642static void bnxt_disable_int_sync(struct bnxt *bp)
5643{
5644 int i;
5645
5646 atomic_inc(&bp->intr_sem);
5647 if (!netif_running(bp->dev))
5648 return;
5649
5650 bnxt_disable_int(bp);
5651 for (i = 0; i < bp->cp_nr_rings; i++)
5652 synchronize_irq(bp->irq_tbl[i].vector);
5653}
5654
5655int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5656{
5657 int rc = 0;
5658
5659#ifdef CONFIG_BNXT_SRIOV
5660 if (bp->sriov_cfg) {
5661 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
5662 !bp->sriov_cfg,
5663 BNXT_SRIOV_CFG_WAIT_TMO);
5664 if (rc)
5665 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
5666 }
5667#endif
5668 /* Change device state to avoid TX queue wake up's */
5669 bnxt_tx_disable(bp);
5670
Michael Chancaefe522015-12-09 19:35:42 -05005671 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chan4cebdce2015-12-09 19:35:43 -05005672 smp_mb__after_atomic();
5673 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
5674 msleep(20);
Michael Chanc0c050c2015-10-22 16:01:17 -04005675
5676 /* Flush rings before disabling interrupts */
5677 bnxt_shutdown_nic(bp, irq_re_init);
5678
5679 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
5680
5681 bnxt_disable_napi(bp);
5682 bnxt_disable_int_sync(bp);
5683 del_timer_sync(&bp->timer);
5684 bnxt_free_skbs(bp);
5685
5686 if (irq_re_init) {
5687 bnxt_free_irq(bp);
5688 bnxt_del_napi(bp);
5689 }
5690 bnxt_free_mem(bp, irq_re_init);
5691 return rc;
5692}
5693
5694static int bnxt_close(struct net_device *dev)
5695{
5696 struct bnxt *bp = netdev_priv(dev);
5697
5698 bnxt_close_nic(bp, true, true);
Michael Chan33f7d552016-04-11 04:11:12 -04005699 bnxt_hwrm_shutdown_link(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04005700 return 0;
5701}
5702
5703/* rtnl_lock held */
5704static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5705{
5706 switch (cmd) {
5707 case SIOCGMIIPHY:
5708 /* fallthru */
5709 case SIOCGMIIREG: {
5710 if (!netif_running(dev))
5711 return -EAGAIN;
5712
5713 return 0;
5714 }
5715
5716 case SIOCSMIIREG:
5717 if (!netif_running(dev))
5718 return -EAGAIN;
5719
5720 return 0;
5721
5722 default:
5723 /* do nothing */
5724 break;
5725 }
5726 return -EOPNOTSUPP;
5727}
5728
5729static struct rtnl_link_stats64 *
5730bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5731{
5732 u32 i;
5733 struct bnxt *bp = netdev_priv(dev);
5734
5735 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5736
5737 if (!bp->bnapi)
5738 return stats;
5739
5740 /* TODO check if we need to synchronize with bnxt_close path */
5741 for (i = 0; i < bp->cp_nr_rings; i++) {
5742 struct bnxt_napi *bnapi = bp->bnapi[i];
5743 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5744 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
5745
5746 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
5747 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
5748 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
5749
5750 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
5751 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
5752 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
5753
5754 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
5755 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
5756 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
5757
5758 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
5759 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
5760 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
5761
5762 stats->rx_missed_errors +=
5763 le64_to_cpu(hw_stats->rx_discard_pkts);
5764
5765 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
5766
Michael Chanc0c050c2015-10-22 16:01:17 -04005767 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
5768 }
5769
Michael Chan9947f832016-03-07 15:38:46 -05005770 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5771 struct rx_port_stats *rx = bp->hw_rx_port_stats;
5772 struct tx_port_stats *tx = bp->hw_tx_port_stats;
5773
5774 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
5775 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
5776 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
5777 le64_to_cpu(rx->rx_ovrsz_frames) +
5778 le64_to_cpu(rx->rx_runt_frames);
5779 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
5780 le64_to_cpu(rx->rx_jbr_frames);
5781 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
5782 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
5783 stats->tx_errors = le64_to_cpu(tx->tx_err);
5784 }
5785
Michael Chanc0c050c2015-10-22 16:01:17 -04005786 return stats;
5787}
5788
5789static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
5790{
5791 struct net_device *dev = bp->dev;
5792 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5793 struct netdev_hw_addr *ha;
5794 u8 *haddr;
5795 int mc_count = 0;
5796 bool update = false;
5797 int off = 0;
5798
5799 netdev_for_each_mc_addr(ha, dev) {
5800 if (mc_count >= BNXT_MAX_MC_ADDRS) {
5801 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5802 vnic->mc_list_count = 0;
5803 return false;
5804 }
5805 haddr = ha->addr;
5806 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
5807 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
5808 update = true;
5809 }
5810 off += ETH_ALEN;
5811 mc_count++;
5812 }
5813 if (mc_count)
5814 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
5815
5816 if (mc_count != vnic->mc_list_count) {
5817 vnic->mc_list_count = mc_count;
5818 update = true;
5819 }
5820 return update;
5821}
5822
5823static bool bnxt_uc_list_updated(struct bnxt *bp)
5824{
5825 struct net_device *dev = bp->dev;
5826 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5827 struct netdev_hw_addr *ha;
5828 int off = 0;
5829
5830 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
5831 return true;
5832
5833 netdev_for_each_uc_addr(ha, dev) {
5834 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
5835 return true;
5836
5837 off += ETH_ALEN;
5838 }
5839 return false;
5840}
5841
5842static void bnxt_set_rx_mode(struct net_device *dev)
5843{
5844 struct bnxt *bp = netdev_priv(dev);
5845 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5846 u32 mask = vnic->rx_mask;
5847 bool mc_update = false;
5848 bool uc_update;
5849
5850 if (!netif_running(dev))
5851 return;
5852
5853 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
5854 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
5855 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
5856
Michael Chan17c71ac2016-07-01 18:46:27 -04005857 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04005858 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5859
5860 uc_update = bnxt_uc_list_updated(bp);
5861
5862 if (dev->flags & IFF_ALLMULTI) {
5863 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5864 vnic->mc_list_count = 0;
5865 } else {
5866 mc_update = bnxt_mc_list_updated(bp, &mask);
5867 }
5868
5869 if (mask != vnic->rx_mask || uc_update || mc_update) {
5870 vnic->rx_mask = mask;
5871
5872 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
5873 schedule_work(&bp->sp_task);
5874 }
5875}
5876
Michael Chanb664f002015-12-02 01:54:08 -05005877static int bnxt_cfg_rx_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04005878{
5879 struct net_device *dev = bp->dev;
5880 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5881 struct netdev_hw_addr *ha;
5882 int i, off = 0, rc;
5883 bool uc_update;
5884
5885 netif_addr_lock_bh(dev);
5886 uc_update = bnxt_uc_list_updated(bp);
5887 netif_addr_unlock_bh(dev);
5888
5889 if (!uc_update)
5890 goto skip_uc;
5891
5892 mutex_lock(&bp->hwrm_cmd_lock);
5893 for (i = 1; i < vnic->uc_filter_count; i++) {
5894 struct hwrm_cfa_l2_filter_free_input req = {0};
5895
5896 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
5897 -1);
5898
5899 req.l2_filter_id = vnic->fw_l2_filter_id[i];
5900
5901 rc = _hwrm_send_message(bp, &req, sizeof(req),
5902 HWRM_CMD_TIMEOUT);
5903 }
5904 mutex_unlock(&bp->hwrm_cmd_lock);
5905
5906 vnic->uc_filter_count = 1;
5907
5908 netif_addr_lock_bh(dev);
5909 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
5910 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5911 } else {
5912 netdev_for_each_uc_addr(ha, dev) {
5913 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
5914 off += ETH_ALEN;
5915 vnic->uc_filter_count++;
5916 }
5917 }
5918 netif_addr_unlock_bh(dev);
5919
5920 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
5921 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
5922 if (rc) {
5923 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
5924 rc);
5925 vnic->uc_filter_count = i;
Michael Chanb664f002015-12-02 01:54:08 -05005926 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04005927 }
5928 }
5929
5930skip_uc:
5931 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
5932 if (rc)
5933 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
5934 rc);
Michael Chanb664f002015-12-02 01:54:08 -05005935
5936 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04005937}
5938
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005939static bool bnxt_rfs_capable(struct bnxt *bp)
5940{
5941#ifdef CONFIG_RFS_ACCEL
5942 struct bnxt_pf_info *pf = &bp->pf;
5943 int vnics;
5944
5945 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
5946 return false;
5947
5948 vnics = 1 + bp->rx_nr_rings;
Vasundhara Volama2304902016-07-25 12:33:36 -04005949 if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
5950 netdev_warn(bp->dev,
5951 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
5952 min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005953 return false;
Vasundhara Volama2304902016-07-25 12:33:36 -04005954 }
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005955
5956 return true;
5957#else
5958 return false;
5959#endif
5960}
5961
Michael Chanc0c050c2015-10-22 16:01:17 -04005962static netdev_features_t bnxt_fix_features(struct net_device *dev,
5963 netdev_features_t features)
5964{
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005965 struct bnxt *bp = netdev_priv(dev);
5966
Vasundhara Volama2304902016-07-25 12:33:36 -04005967 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
Michael Chan2bcfa6f2015-12-27 18:19:24 -05005968 features &= ~NETIF_F_NTUPLE;
Michael Chan5a9f6b22016-06-06 02:37:15 -04005969
5970 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
5971 * turned on or off together.
5972 */
5973 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
5974 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
5975 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
5976 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
5977 NETIF_F_HW_VLAN_STAG_RX);
5978 else
5979 features |= NETIF_F_HW_VLAN_CTAG_RX |
5980 NETIF_F_HW_VLAN_STAG_RX;
5981 }
Michael Chancf6645f2016-06-13 02:25:28 -04005982#ifdef CONFIG_BNXT_SRIOV
5983 if (BNXT_VF(bp)) {
5984 if (bp->vf.vlan) {
5985 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
5986 NETIF_F_HW_VLAN_STAG_RX);
5987 }
5988 }
5989#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04005990 return features;
5991}
5992
5993static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
5994{
5995 struct bnxt *bp = netdev_priv(dev);
5996 u32 flags = bp->flags;
5997 u32 changes;
5998 int rc = 0;
5999 bool re_init = false;
6000 bool update_tpa = false;
6001
6002 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006003 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04006004 flags |= BNXT_FLAG_GRO;
6005 if (features & NETIF_F_LRO)
6006 flags |= BNXT_FLAG_LRO;
6007
6008 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6009 flags |= BNXT_FLAG_STRIP_VLAN;
6010
6011 if (features & NETIF_F_NTUPLE)
6012 flags |= BNXT_FLAG_RFS;
6013
6014 changes = flags ^ bp->flags;
6015 if (changes & BNXT_FLAG_TPA) {
6016 update_tpa = true;
6017 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6018 (flags & BNXT_FLAG_TPA) == 0)
6019 re_init = true;
6020 }
6021
6022 if (changes & ~BNXT_FLAG_TPA)
6023 re_init = true;
6024
6025 if (flags != bp->flags) {
6026 u32 old_flags = bp->flags;
6027
6028 bp->flags = flags;
6029
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006030 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04006031 if (update_tpa)
6032 bnxt_set_ring_params(bp);
6033 return rc;
6034 }
6035
6036 if (re_init) {
6037 bnxt_close_nic(bp, false, false);
6038 if (update_tpa)
6039 bnxt_set_ring_params(bp);
6040
6041 return bnxt_open_nic(bp, false, false);
6042 }
6043 if (update_tpa) {
6044 rc = bnxt_set_tpa(bp,
6045 (flags & BNXT_FLAG_TPA) ?
6046 true : false);
6047 if (rc)
6048 bp->flags = old_flags;
6049 }
6050 }
6051 return rc;
6052}
6053
Michael Chan9f554592016-01-02 23:44:58 -05006054static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6055{
Michael Chanb6ab4b02016-01-02 23:44:59 -05006056 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05006057 int i = bnapi->index;
6058
Michael Chan3b2b7d92016-01-02 23:45:00 -05006059 if (!txr)
6060 return;
6061
Michael Chan9f554592016-01-02 23:44:58 -05006062 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6063 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6064 txr->tx_cons);
6065}
6066
6067static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6068{
Michael Chanb6ab4b02016-01-02 23:44:59 -05006069 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05006070 int i = bnapi->index;
6071
Michael Chan3b2b7d92016-01-02 23:45:00 -05006072 if (!rxr)
6073 return;
6074
Michael Chan9f554592016-01-02 23:44:58 -05006075 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6076 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6077 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6078 rxr->rx_sw_agg_prod);
6079}
6080
6081static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6082{
6083 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6084 int i = bnapi->index;
6085
6086 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6087 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6088}
6089
Michael Chanc0c050c2015-10-22 16:01:17 -04006090static void bnxt_dbg_dump_states(struct bnxt *bp)
6091{
6092 int i;
6093 struct bnxt_napi *bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04006094
6095 for (i = 0; i < bp->cp_nr_rings; i++) {
6096 bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04006097 if (netif_msg_drv(bp)) {
Michael Chan9f554592016-01-02 23:44:58 -05006098 bnxt_dump_tx_sw_state(bnapi);
6099 bnxt_dump_rx_sw_state(bnapi);
6100 bnxt_dump_cp_sw_state(bnapi);
Michael Chanc0c050c2015-10-22 16:01:17 -04006101 }
6102 }
6103}
6104
Michael Chan6988bd92016-06-13 02:25:29 -04006105static void bnxt_reset_task(struct bnxt *bp, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04006106{
Michael Chan6988bd92016-06-13 02:25:29 -04006107 if (!silent)
6108 bnxt_dbg_dump_states(bp);
Michael Chan028de142015-12-09 19:35:44 -05006109 if (netif_running(bp->dev)) {
6110 bnxt_close_nic(bp, false, false);
6111 bnxt_open_nic(bp, false, false);
6112 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006113}
6114
6115static void bnxt_tx_timeout(struct net_device *dev)
6116{
6117 struct bnxt *bp = netdev_priv(dev);
6118
6119 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6120 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6121 schedule_work(&bp->sp_task);
6122}
6123
6124#ifdef CONFIG_NET_POLL_CONTROLLER
6125static void bnxt_poll_controller(struct net_device *dev)
6126{
6127 struct bnxt *bp = netdev_priv(dev);
6128 int i;
6129
6130 for (i = 0; i < bp->cp_nr_rings; i++) {
6131 struct bnxt_irq *irq = &bp->irq_tbl[i];
6132
6133 disable_irq(irq->vector);
6134 irq->handler(irq->vector, bp->bnapi[i]);
6135 enable_irq(irq->vector);
6136 }
6137}
6138#endif
6139
6140static void bnxt_timer(unsigned long data)
6141{
6142 struct bnxt *bp = (struct bnxt *)data;
6143 struct net_device *dev = bp->dev;
6144
6145 if (!netif_running(dev))
6146 return;
6147
6148 if (atomic_read(&bp->intr_sem) != 0)
6149 goto bnxt_restart_timer;
6150
Michael Chan3bdf56c2016-03-07 15:38:45 -05006151 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
6152 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6153 schedule_work(&bp->sp_task);
6154 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006155bnxt_restart_timer:
6156 mod_timer(&bp->timer, jiffies + bp->current_interval);
6157}
6158
Michael Chan6988bd92016-06-13 02:25:29 -04006159/* Only called from bnxt_sp_task() */
6160static void bnxt_reset(struct bnxt *bp, bool silent)
6161{
6162 /* bnxt_reset_task() calls bnxt_close_nic() which waits
6163 * for BNXT_STATE_IN_SP_TASK to clear.
6164 * If there is a parallel dev_close(), bnxt_close() may be holding
6165 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6166 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6167 */
6168 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6169 rtnl_lock();
6170 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6171 bnxt_reset_task(bp, silent);
6172 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6173 rtnl_unlock();
6174}
6175
Michael Chanc0c050c2015-10-22 16:01:17 -04006176static void bnxt_cfg_ntp_filters(struct bnxt *);
6177
6178static void bnxt_sp_task(struct work_struct *work)
6179{
6180 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6181 int rc;
6182
Michael Chan4cebdce2015-12-09 19:35:43 -05006183 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6184 smp_mb__after_atomic();
6185 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6186 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006187 return;
Michael Chan4cebdce2015-12-09 19:35:43 -05006188 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006189
6190 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
6191 bnxt_cfg_rx_mode(bp);
6192
6193 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6194 bnxt_cfg_ntp_filters(bp);
6195 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
Michael Chan286ef9d2016-11-16 21:13:08 -05006196 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6197 &bp->sp_event))
6198 bnxt_hwrm_phy_qcaps(bp);
6199
Michael Chanc0c050c2015-10-22 16:01:17 -04006200 rc = bnxt_update_link(bp, true);
6201 if (rc)
6202 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6203 rc);
6204 }
6205 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6206 bnxt_hwrm_exec_fwd_req(bp);
6207 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6208 bnxt_hwrm_tunnel_dst_port_alloc(
6209 bp, bp->vxlan_port,
6210 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6211 }
6212 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6213 bnxt_hwrm_tunnel_dst_port_free(
6214 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6215 }
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006216 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6217 bnxt_hwrm_tunnel_dst_port_alloc(
6218 bp, bp->nge_port,
6219 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6220 }
6221 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6222 bnxt_hwrm_tunnel_dst_port_free(
6223 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6224 }
Michael Chan6988bd92016-06-13 02:25:29 -04006225 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6226 bnxt_reset(bp, false);
Michael Chan4cebdce2015-12-09 19:35:43 -05006227
Michael Chanfc0f1922016-06-13 02:25:30 -04006228 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6229 bnxt_reset(bp, true);
6230
Michael Chan4bb13ab2016-04-05 14:09:01 -04006231 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
Michael Chan10289be2016-05-15 03:04:49 -04006232 bnxt_get_port_module_status(bp);
Michael Chan4bb13ab2016-04-05 14:09:01 -04006233
Michael Chan3bdf56c2016-03-07 15:38:45 -05006234 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6235 bnxt_hwrm_port_qstats(bp);
6236
Michael Chan4cebdce2015-12-09 19:35:43 -05006237 smp_mb__before_atomic();
6238 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006239}
6240
6241static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
6242{
6243 int rc;
6244 struct bnxt *bp = netdev_priv(dev);
6245
6246 SET_NETDEV_DEV(dev, &pdev->dev);
6247
6248 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6249 rc = pci_enable_device(pdev);
6250 if (rc) {
6251 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
6252 goto init_err;
6253 }
6254
6255 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6256 dev_err(&pdev->dev,
6257 "Cannot find PCI device base address, aborting\n");
6258 rc = -ENODEV;
6259 goto init_err_disable;
6260 }
6261
6262 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6263 if (rc) {
6264 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
6265 goto init_err_disable;
6266 }
6267
6268 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
6269 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6270 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
6271 goto init_err_disable;
6272 }
6273
6274 pci_set_master(pdev);
6275
6276 bp->dev = dev;
6277 bp->pdev = pdev;
6278
6279 bp->bar0 = pci_ioremap_bar(pdev, 0);
6280 if (!bp->bar0) {
6281 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
6282 rc = -ENOMEM;
6283 goto init_err_release;
6284 }
6285
6286 bp->bar1 = pci_ioremap_bar(pdev, 2);
6287 if (!bp->bar1) {
6288 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
6289 rc = -ENOMEM;
6290 goto init_err_release;
6291 }
6292
6293 bp->bar2 = pci_ioremap_bar(pdev, 4);
6294 if (!bp->bar2) {
6295 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
6296 rc = -ENOMEM;
6297 goto init_err_release;
6298 }
6299
Satish Baddipadige6316ea62016-03-07 15:38:48 -05006300 pci_enable_pcie_error_reporting(pdev);
6301
Michael Chanc0c050c2015-10-22 16:01:17 -04006302 INIT_WORK(&bp->sp_task, bnxt_sp_task);
6303
6304 spin_lock_init(&bp->ntp_fltr_lock);
6305
6306 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
6307 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
6308
Michael Chandfb5b892016-02-26 04:00:01 -05006309 /* tick values in micro seconds */
Michael Chandfc9c942016-02-26 04:00:03 -05006310 bp->rx_coal_ticks = 12;
6311 bp->rx_coal_bufs = 30;
Michael Chandfb5b892016-02-26 04:00:01 -05006312 bp->rx_coal_ticks_irq = 1;
6313 bp->rx_coal_bufs_irq = 2;
Michael Chanc0c050c2015-10-22 16:01:17 -04006314
Michael Chandfc9c942016-02-26 04:00:03 -05006315 bp->tx_coal_ticks = 25;
6316 bp->tx_coal_bufs = 30;
6317 bp->tx_coal_ticks_irq = 2;
6318 bp->tx_coal_bufs_irq = 2;
6319
Michael Chan51f30782016-07-01 18:46:29 -04006320 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
6321
Michael Chanc0c050c2015-10-22 16:01:17 -04006322 init_timer(&bp->timer);
6323 bp->timer.data = (unsigned long)bp;
6324 bp->timer.function = bnxt_timer;
6325 bp->current_interval = BNXT_TIMER_INTERVAL;
6326
Michael Chancaefe522015-12-09 19:35:42 -05006327 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04006328
6329 return 0;
6330
6331init_err_release:
6332 if (bp->bar2) {
6333 pci_iounmap(pdev, bp->bar2);
6334 bp->bar2 = NULL;
6335 }
6336
6337 if (bp->bar1) {
6338 pci_iounmap(pdev, bp->bar1);
6339 bp->bar1 = NULL;
6340 }
6341
6342 if (bp->bar0) {
6343 pci_iounmap(pdev, bp->bar0);
6344 bp->bar0 = NULL;
6345 }
6346
6347 pci_release_regions(pdev);
6348
6349init_err_disable:
6350 pci_disable_device(pdev);
6351
6352init_err:
6353 return rc;
6354}
6355
6356/* rtnl_lock held */
6357static int bnxt_change_mac_addr(struct net_device *dev, void *p)
6358{
6359 struct sockaddr *addr = p;
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05006360 struct bnxt *bp = netdev_priv(dev);
6361 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006362
6363 if (!is_valid_ether_addr(addr->sa_data))
6364 return -EADDRNOTAVAIL;
6365
Michael Chan84c33dd2016-04-11 04:11:13 -04006366 rc = bnxt_approve_mac(bp, addr->sa_data);
6367 if (rc)
6368 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006369
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05006370 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
6371 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006372
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05006373 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6374 if (netif_running(dev)) {
6375 bnxt_close_nic(bp, false, false);
6376 rc = bnxt_open_nic(bp, false, false);
6377 }
6378
6379 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006380}
6381
6382/* rtnl_lock held */
6383static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
6384{
6385 struct bnxt *bp = netdev_priv(dev);
6386
Michael Chanc0c050c2015-10-22 16:01:17 -04006387 if (netif_running(dev))
6388 bnxt_close_nic(bp, false, false);
6389
6390 dev->mtu = new_mtu;
6391 bnxt_set_ring_params(bp);
6392
6393 if (netif_running(dev))
6394 return bnxt_open_nic(bp, false, false);
6395
6396 return 0;
6397}
6398
Michael Chanc5e3deb2016-12-02 21:17:15 -05006399int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
Michael Chanc0c050c2015-10-22 16:01:17 -04006400{
6401 struct bnxt *bp = netdev_priv(dev);
Michael Chan3ffb6a32016-11-11 00:11:42 -05006402 bool sh = false;
John Fastabend16e5cc62016-02-16 21:16:43 -08006403
Michael Chanc0c050c2015-10-22 16:01:17 -04006404 if (tc > bp->max_tc) {
6405 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
6406 tc, bp->max_tc);
6407 return -EINVAL;
6408 }
6409
6410 if (netdev_get_num_tc(dev) == tc)
6411 return 0;
6412
Michael Chan3ffb6a32016-11-11 00:11:42 -05006413 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6414 sh = true;
6415
Michael Chanc0c050c2015-10-22 16:01:17 -04006416 if (tc) {
Michael Chan6e6c5a52016-01-02 23:45:02 -05006417 int max_rx_rings, max_tx_rings, rc;
Michael Chan01657bc2016-01-02 23:45:03 -05006418
6419 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
Michael Chan6e6c5a52016-01-02 23:45:02 -05006420 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
Michael Chanc0c050c2015-10-22 16:01:17 -04006421 return -ENOMEM;
6422 }
6423
6424 /* Needs to close the device and do hw resource re-allocations */
6425 if (netif_running(bp->dev))
6426 bnxt_close_nic(bp, true, false);
6427
6428 if (tc) {
6429 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
6430 netdev_set_num_tc(dev, tc);
6431 } else {
6432 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6433 netdev_reset_tc(dev);
6434 }
Michael Chan3ffb6a32016-11-11 00:11:42 -05006435 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6436 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04006437 bp->num_stat_ctxs = bp->cp_nr_rings;
6438
6439 if (netif_running(bp->dev))
6440 return bnxt_open_nic(bp, true, false);
6441
6442 return 0;
6443}
6444
Michael Chanc5e3deb2016-12-02 21:17:15 -05006445static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6446 struct tc_to_netdev *ntc)
6447{
6448 if (ntc->type != TC_SETUP_MQPRIO)
6449 return -EINVAL;
6450
6451 return bnxt_setup_mq_tc(dev, ntc->tc);
6452}
6453
Michael Chanc0c050c2015-10-22 16:01:17 -04006454#ifdef CONFIG_RFS_ACCEL
6455static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
6456 struct bnxt_ntuple_filter *f2)
6457{
6458 struct flow_keys *keys1 = &f1->fkeys;
6459 struct flow_keys *keys2 = &f2->fkeys;
6460
6461 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
6462 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
6463 keys1->ports.ports == keys2->ports.ports &&
6464 keys1->basic.ip_proto == keys2->basic.ip_proto &&
6465 keys1->basic.n_proto == keys2->basic.n_proto &&
Michael Chana54c4d72016-07-25 12:33:35 -04006466 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
6467 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
Michael Chanc0c050c2015-10-22 16:01:17 -04006468 return true;
6469
6470 return false;
6471}
6472
6473static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
6474 u16 rxq_index, u32 flow_id)
6475{
6476 struct bnxt *bp = netdev_priv(dev);
6477 struct bnxt_ntuple_filter *fltr, *new_fltr;
6478 struct flow_keys *fkeys;
6479 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
Michael Chana54c4d72016-07-25 12:33:35 -04006480 int rc = 0, idx, bit_id, l2_idx = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006481 struct hlist_head *head;
6482
6483 if (skb->encapsulation)
6484 return -EPROTONOSUPPORT;
6485
Michael Chana54c4d72016-07-25 12:33:35 -04006486 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
6487 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6488 int off = 0, j;
6489
6490 netif_addr_lock_bh(dev);
6491 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
6492 if (ether_addr_equal(eth->h_dest,
6493 vnic->uc_list + off)) {
6494 l2_idx = j + 1;
6495 break;
6496 }
6497 }
6498 netif_addr_unlock_bh(dev);
6499 if (!l2_idx)
6500 return -EINVAL;
6501 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006502 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
6503 if (!new_fltr)
6504 return -ENOMEM;
6505
6506 fkeys = &new_fltr->fkeys;
6507 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
6508 rc = -EPROTONOSUPPORT;
6509 goto err_free;
6510 }
6511
6512 if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
6513 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
6514 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
6515 rc = -EPROTONOSUPPORT;
6516 goto err_free;
6517 }
6518
Michael Chana54c4d72016-07-25 12:33:35 -04006519 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04006520 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
6521
6522 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
6523 head = &bp->ntp_fltr_hash_tbl[idx];
6524 rcu_read_lock();
6525 hlist_for_each_entry_rcu(fltr, head, hash) {
6526 if (bnxt_fltr_match(fltr, new_fltr)) {
6527 rcu_read_unlock();
6528 rc = 0;
6529 goto err_free;
6530 }
6531 }
6532 rcu_read_unlock();
6533
6534 spin_lock_bh(&bp->ntp_fltr_lock);
Michael Chan84e86b92015-11-05 16:25:50 -05006535 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6536 BNXT_NTP_FLTR_MAX_FLTR, 0);
6537 if (bit_id < 0) {
Michael Chanc0c050c2015-10-22 16:01:17 -04006538 spin_unlock_bh(&bp->ntp_fltr_lock);
6539 rc = -ENOMEM;
6540 goto err_free;
6541 }
6542
Michael Chan84e86b92015-11-05 16:25:50 -05006543 new_fltr->sw_id = (u16)bit_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04006544 new_fltr->flow_id = flow_id;
Michael Chana54c4d72016-07-25 12:33:35 -04006545 new_fltr->l2_fltr_idx = l2_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04006546 new_fltr->rxq = rxq_index;
6547 hlist_add_head_rcu(&new_fltr->hash, head);
6548 bp->ntp_fltr_count++;
6549 spin_unlock_bh(&bp->ntp_fltr_lock);
6550
6551 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
6552 schedule_work(&bp->sp_task);
6553
6554 return new_fltr->sw_id;
6555
6556err_free:
6557 kfree(new_fltr);
6558 return rc;
6559}
6560
6561static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6562{
6563 int i;
6564
6565 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
6566 struct hlist_head *head;
6567 struct hlist_node *tmp;
6568 struct bnxt_ntuple_filter *fltr;
6569 int rc;
6570
6571 head = &bp->ntp_fltr_hash_tbl[i];
6572 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
6573 bool del = false;
6574
6575 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
6576 if (rps_may_expire_flow(bp->dev, fltr->rxq,
6577 fltr->flow_id,
6578 fltr->sw_id)) {
6579 bnxt_hwrm_cfa_ntuple_filter_free(bp,
6580 fltr);
6581 del = true;
6582 }
6583 } else {
6584 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
6585 fltr);
6586 if (rc)
6587 del = true;
6588 else
6589 set_bit(BNXT_FLTR_VALID, &fltr->state);
6590 }
6591
6592 if (del) {
6593 spin_lock_bh(&bp->ntp_fltr_lock);
6594 hlist_del_rcu(&fltr->hash);
6595 bp->ntp_fltr_count--;
6596 spin_unlock_bh(&bp->ntp_fltr_lock);
6597 synchronize_rcu();
6598 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
6599 kfree(fltr);
6600 }
6601 }
6602 }
Jeffrey Huang19241362016-02-26 04:00:00 -05006603 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
6604 netdev_info(bp->dev, "Receive PF driver unload event!");
Michael Chanc0c050c2015-10-22 16:01:17 -04006605}
6606
6607#else
6608
6609static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6610{
6611}
6612
6613#endif /* CONFIG_RFS_ACCEL */
6614
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006615static void bnxt_udp_tunnel_add(struct net_device *dev,
6616 struct udp_tunnel_info *ti)
Michael Chanc0c050c2015-10-22 16:01:17 -04006617{
6618 struct bnxt *bp = netdev_priv(dev);
6619
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006620 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6621 return;
6622
Michael Chanc0c050c2015-10-22 16:01:17 -04006623 if (!netif_running(dev))
6624 return;
6625
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006626 switch (ti->type) {
6627 case UDP_TUNNEL_TYPE_VXLAN:
6628 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
6629 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04006630
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006631 bp->vxlan_port_cnt++;
6632 if (bp->vxlan_port_cnt == 1) {
6633 bp->vxlan_port = ti->port;
6634 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04006635 schedule_work(&bp->sp_task);
6636 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006637 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006638 case UDP_TUNNEL_TYPE_GENEVE:
6639 if (bp->nge_port_cnt && bp->nge_port != ti->port)
6640 return;
6641
6642 bp->nge_port_cnt++;
6643 if (bp->nge_port_cnt == 1) {
6644 bp->nge_port = ti->port;
6645 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
6646 }
6647 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006648 default:
6649 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04006650 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006651
6652 schedule_work(&bp->sp_task);
6653}
6654
6655static void bnxt_udp_tunnel_del(struct net_device *dev,
6656 struct udp_tunnel_info *ti)
6657{
6658 struct bnxt *bp = netdev_priv(dev);
6659
6660 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6661 return;
6662
6663 if (!netif_running(dev))
6664 return;
6665
6666 switch (ti->type) {
6667 case UDP_TUNNEL_TYPE_VXLAN:
6668 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
6669 return;
6670 bp->vxlan_port_cnt--;
6671
6672 if (bp->vxlan_port_cnt != 0)
6673 return;
6674
6675 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
6676 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07006677 case UDP_TUNNEL_TYPE_GENEVE:
6678 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
6679 return;
6680 bp->nge_port_cnt--;
6681
6682 if (bp->nge_port_cnt != 0)
6683 return;
6684
6685 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
6686 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006687 default:
6688 return;
6689 }
6690
6691 schedule_work(&bp->sp_task);
Michael Chanc0c050c2015-10-22 16:01:17 -04006692}
6693
6694static const struct net_device_ops bnxt_netdev_ops = {
6695 .ndo_open = bnxt_open,
6696 .ndo_start_xmit = bnxt_start_xmit,
6697 .ndo_stop = bnxt_close,
6698 .ndo_get_stats64 = bnxt_get_stats64,
6699 .ndo_set_rx_mode = bnxt_set_rx_mode,
6700 .ndo_do_ioctl = bnxt_ioctl,
6701 .ndo_validate_addr = eth_validate_addr,
6702 .ndo_set_mac_address = bnxt_change_mac_addr,
6703 .ndo_change_mtu = bnxt_change_mtu,
6704 .ndo_fix_features = bnxt_fix_features,
6705 .ndo_set_features = bnxt_set_features,
6706 .ndo_tx_timeout = bnxt_tx_timeout,
6707#ifdef CONFIG_BNXT_SRIOV
6708 .ndo_get_vf_config = bnxt_get_vf_config,
6709 .ndo_set_vf_mac = bnxt_set_vf_mac,
6710 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
6711 .ndo_set_vf_rate = bnxt_set_vf_bw,
6712 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
6713 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
6714#endif
6715#ifdef CONFIG_NET_POLL_CONTROLLER
6716 .ndo_poll_controller = bnxt_poll_controller,
6717#endif
6718 .ndo_setup_tc = bnxt_setup_tc,
6719#ifdef CONFIG_RFS_ACCEL
6720 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
6721#endif
Alexander Duyckad51b8e2016-06-16 12:21:19 -07006722 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
6723 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
Michael Chanc0c050c2015-10-22 16:01:17 -04006724};
6725
6726static void bnxt_remove_one(struct pci_dev *pdev)
6727{
6728 struct net_device *dev = pci_get_drvdata(pdev);
6729 struct bnxt *bp = netdev_priv(dev);
6730
6731 if (BNXT_PF(bp))
6732 bnxt_sriov_disable(bp);
6733
Satish Baddipadige6316ea62016-03-07 15:38:48 -05006734 pci_disable_pcie_error_reporting(pdev);
Michael Chanc0c050c2015-10-22 16:01:17 -04006735 unregister_netdev(dev);
6736 cancel_work_sync(&bp->sp_task);
6737 bp->sp_event = 0;
6738
Michael Chan78095922016-12-07 00:26:16 -05006739 bnxt_clear_int_mode(bp);
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05006740 bnxt_hwrm_func_drv_unrgtr(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006741 bnxt_free_hwrm_resources(bp);
Michael Chan7df4ae92016-12-02 21:17:17 -05006742 bnxt_dcb_free(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006743 pci_iounmap(pdev, bp->bar2);
6744 pci_iounmap(pdev, bp->bar1);
6745 pci_iounmap(pdev, bp->bar0);
Michael Chana588e452016-12-07 00:26:21 -05006746 kfree(bp->edev);
6747 bp->edev = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04006748 free_netdev(dev);
6749
6750 pci_release_regions(pdev);
6751 pci_disable_device(pdev);
6752}
6753
6754static int bnxt_probe_phy(struct bnxt *bp)
6755{
6756 int rc = 0;
6757 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chanc0c050c2015-10-22 16:01:17 -04006758
Michael Chan170ce012016-04-05 14:08:57 -04006759 rc = bnxt_hwrm_phy_qcaps(bp);
6760 if (rc) {
6761 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
6762 rc);
6763 return rc;
6764 }
6765
Michael Chanc0c050c2015-10-22 16:01:17 -04006766 rc = bnxt_update_link(bp, false);
6767 if (rc) {
6768 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
6769 rc);
6770 return rc;
6771 }
6772
Michael Chan93ed8112016-06-13 02:25:37 -04006773 /* Older firmware does not have supported_auto_speeds, so assume
6774 * that all supported speeds can be autonegotiated.
6775 */
6776 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
6777 link_info->support_auto_speeds = link_info->support_speeds;
6778
Michael Chanc0c050c2015-10-22 16:01:17 -04006779 /*initialize the ethool setting copy with NVM settings */
Michael Chan0d8abf02016-02-10 17:33:47 -05006780 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
Michael Chanc9ee9512016-04-05 14:08:56 -04006781 link_info->autoneg = BNXT_AUTONEG_SPEED;
6782 if (bp->hwrm_spec_code >= 0x10201) {
6783 if (link_info->auto_pause_setting &
6784 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
6785 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6786 } else {
6787 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6788 }
Michael Chan0d8abf02016-02-10 17:33:47 -05006789 link_info->advertising = link_info->auto_link_speeds;
Michael Chan0d8abf02016-02-10 17:33:47 -05006790 } else {
6791 link_info->req_link_speed = link_info->force_link_speed;
6792 link_info->req_duplex = link_info->duplex_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04006793 }
Michael Chanc9ee9512016-04-05 14:08:56 -04006794 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
6795 link_info->req_flow_ctrl =
6796 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
6797 else
6798 link_info->req_flow_ctrl = link_info->force_pause_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04006799 return rc;
6800}
6801
6802static int bnxt_get_max_irq(struct pci_dev *pdev)
6803{
6804 u16 ctrl;
6805
6806 if (!pdev->msix_cap)
6807 return 1;
6808
6809 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
6810 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
6811}
6812
Michael Chan6e6c5a52016-01-02 23:45:02 -05006813static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
6814 int *max_cp)
Michael Chanc0c050c2015-10-22 16:01:17 -04006815{
Michael Chan6e6c5a52016-01-02 23:45:02 -05006816 int max_ring_grps = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04006817
Michael Chan379a80a2015-10-23 15:06:19 -04006818#ifdef CONFIG_BNXT_SRIOV
Arnd Bergmann415b6f12016-01-12 16:05:08 +01006819 if (!BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04006820 *max_tx = bp->vf.max_tx_rings;
6821 *max_rx = bp->vf.max_rx_rings;
Michael Chan6e6c5a52016-01-02 23:45:02 -05006822 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
6823 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
Michael Chanb72d4a62015-12-27 18:19:27 -05006824 max_ring_grps = bp->vf.max_hw_ring_grps;
Arnd Bergmann415b6f12016-01-12 16:05:08 +01006825 } else
Michael Chan379a80a2015-10-23 15:06:19 -04006826#endif
Arnd Bergmann415b6f12016-01-12 16:05:08 +01006827 {
6828 *max_tx = bp->pf.max_tx_rings;
6829 *max_rx = bp->pf.max_rx_rings;
6830 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
6831 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
6832 max_ring_grps = bp->pf.max_hw_ring_grps;
Michael Chanc0c050c2015-10-22 16:01:17 -04006833 }
Prashant Sreedharan76595192016-07-18 07:15:22 -04006834 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
6835 *max_cp -= 1;
6836 *max_rx -= 2;
6837 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006838 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6839 *max_rx >>= 1;
Michael Chanb72d4a62015-12-27 18:19:27 -05006840 *max_rx = min_t(int, *max_rx, max_ring_grps);
Michael Chan6e6c5a52016-01-02 23:45:02 -05006841}
6842
6843int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
6844{
6845 int rx, tx, cp;
6846
6847 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
6848 if (!rx || !tx || !cp)
6849 return -ENOMEM;
6850
6851 *max_rx = rx;
6852 *max_tx = tx;
6853 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
6854}
6855
Michael Chane4060d32016-12-07 00:26:19 -05006856static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
6857 bool shared)
6858{
6859 int rc;
6860
6861 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
6862 if (rc)
6863 return rc;
6864
6865 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
6866 int max_cp, max_stat, max_irq;
6867
6868 /* Reserve minimum resources for RoCE */
6869 max_cp = bnxt_get_max_func_cp_rings(bp);
6870 max_stat = bnxt_get_max_func_stat_ctxs(bp);
6871 max_irq = bnxt_get_max_func_irqs(bp);
6872 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
6873 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
6874 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
6875 return 0;
6876
6877 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
6878 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
6879 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
6880 max_cp = min_t(int, max_cp, max_irq);
6881 max_cp = min_t(int, max_cp, max_stat);
6882 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
6883 if (rc)
6884 rc = 0;
6885 }
6886 return rc;
6887}
6888
Michael Chan6e6c5a52016-01-02 23:45:02 -05006889static int bnxt_set_dflt_rings(struct bnxt *bp)
6890{
6891 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6892 bool sh = true;
6893
6894 if (sh)
6895 bp->flags |= BNXT_FLAG_SHARED_RINGS;
6896 dflt_rings = netif_get_num_default_rss_queues();
Michael Chane4060d32016-12-07 00:26:19 -05006897 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
Michael Chan6e6c5a52016-01-02 23:45:02 -05006898 if (rc)
6899 return rc;
6900 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
6901 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
6902 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6903 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6904 bp->tx_nr_rings + bp->rx_nr_rings;
6905 bp->num_stat_ctxs = bp->cp_nr_rings;
Prashant Sreedharan76595192016-07-18 07:15:22 -04006906 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6907 bp->rx_nr_rings++;
6908 bp->cp_nr_rings++;
6909 }
Michael Chan6e6c5a52016-01-02 23:45:02 -05006910 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04006911}
6912
Michael Chan7b08f662016-12-07 00:26:18 -05006913void bnxt_restore_pf_fw_resources(struct bnxt *bp)
6914{
6915 ASSERT_RTNL();
6916 bnxt_hwrm_func_qcaps(bp);
Michael Chana588e452016-12-07 00:26:21 -05006917 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
Michael Chan7b08f662016-12-07 00:26:18 -05006918}
6919
Ajit Khaparde90c4f782016-05-15 03:04:45 -04006920static void bnxt_parse_log_pcie_link(struct bnxt *bp)
6921{
6922 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
6923 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
6924
6925 if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
6926 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
6927 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
6928 else
6929 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
6930 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
6931 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
6932 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
6933 "Unknown", width);
6934}
6935
Michael Chanc0c050c2015-10-22 16:01:17 -04006936static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6937{
6938 static int version_printed;
6939 struct net_device *dev;
6940 struct bnxt *bp;
Michael Chan6e6c5a52016-01-02 23:45:02 -05006941 int rc, max_irqs;
Michael Chanc0c050c2015-10-22 16:01:17 -04006942
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -04006943 if (pdev->device == 0x16cd && pci_is_bridge(pdev))
6944 return -ENODEV;
6945
Michael Chanc0c050c2015-10-22 16:01:17 -04006946 if (version_printed++ == 0)
6947 pr_info("%s", version);
6948
6949 max_irqs = bnxt_get_max_irq(pdev);
6950 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
6951 if (!dev)
6952 return -ENOMEM;
6953
6954 bp = netdev_priv(dev);
6955
6956 if (bnxt_vf_pciid(ent->driver_data))
6957 bp->flags |= BNXT_FLAG_VF;
6958
Michael Chan2bcfa6f2015-12-27 18:19:24 -05006959 if (pdev->msix_cap)
Michael Chanc0c050c2015-10-22 16:01:17 -04006960 bp->flags |= BNXT_FLAG_MSIX_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -04006961
6962 rc = bnxt_init_board(pdev, dev);
6963 if (rc < 0)
6964 goto init_err_free;
6965
6966 dev->netdev_ops = &bnxt_netdev_ops;
6967 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
6968 dev->ethtool_ops = &bnxt_ethtool_ops;
6969
6970 pci_set_drvdata(pdev, dev);
6971
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006972 rc = bnxt_alloc_hwrm_resources(bp);
6973 if (rc)
6974 goto init_err;
6975
6976 mutex_init(&bp->hwrm_cmd_lock);
6977 rc = bnxt_hwrm_ver_get(bp);
6978 if (rc)
6979 goto init_err;
6980
Rob Swindell5ac67d82016-09-19 03:58:03 -04006981 bnxt_hwrm_fw_set_time(bp);
6982
Michael Chanc0c050c2015-10-22 16:01:17 -04006983 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6984 NETIF_F_TSO | NETIF_F_TSO6 |
6985 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Tom Herbert7e133182016-05-18 09:06:10 -07006986 NETIF_F_GSO_IPXIP4 |
Alexander Duyck152971e2016-05-02 09:38:55 -07006987 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
6988 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006989 NETIF_F_RXCSUM | NETIF_F_GRO;
6990
6991 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6992 dev->hw_features |= NETIF_F_LRO;
Michael Chanc0c050c2015-10-22 16:01:17 -04006993
Michael Chanc0c050c2015-10-22 16:01:17 -04006994 dev->hw_enc_features =
6995 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6996 NETIF_F_TSO | NETIF_F_TSO6 |
6997 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Alexander Duyck152971e2016-05-02 09:38:55 -07006998 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07006999 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
Alexander Duyck152971e2016-05-02 09:38:55 -07007000 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
7001 NETIF_F_GSO_GRE_CSUM;
Michael Chanc0c050c2015-10-22 16:01:17 -04007002 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
7003 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
7004 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
7005 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
7006 dev->priv_flags |= IFF_UNICAST_FLT;
7007
Jarod Wilsone1c6dcc2016-10-17 15:54:04 -04007008 /* MTU range: 60 - 9500 */
7009 dev->min_mtu = ETH_ZLEN;
7010 dev->max_mtu = 9500;
7011
Michael Chan7df4ae92016-12-02 21:17:17 -05007012 bnxt_dcb_init(bp);
7013
Michael Chanc0c050c2015-10-22 16:01:17 -04007014#ifdef CONFIG_BNXT_SRIOV
7015 init_waitqueue_head(&bp->sriov_cfg_wait);
7016#endif
Michael Chan309369c2016-06-13 02:25:34 -04007017 bp->gro_func = bnxt_gro_func_5730x;
Michael Chan94758f82016-06-13 02:25:35 -04007018 if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
7019 bp->gro_func = bnxt_gro_func_5731x;
Michael Chan309369c2016-06-13 02:25:34 -04007020
Michael Chanc0c050c2015-10-22 16:01:17 -04007021 rc = bnxt_hwrm_func_drv_rgtr(bp);
7022 if (rc)
7023 goto init_err;
7024
Michael Chana1653b12016-12-07 00:26:20 -05007025 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
7026 if (rc)
7027 goto init_err;
7028
Michael Chana588e452016-12-07 00:26:21 -05007029 bp->ulp_probe = bnxt_ulp_probe;
7030
Michael Chanc0c050c2015-10-22 16:01:17 -04007031 /* Get the MAX capabilities for this function */
7032 rc = bnxt_hwrm_func_qcaps(bp);
7033 if (rc) {
7034 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
7035 rc);
7036 rc = -1;
7037 goto init_err;
7038 }
7039
7040 rc = bnxt_hwrm_queue_qportcfg(bp);
7041 if (rc) {
7042 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
7043 rc);
7044 rc = -1;
7045 goto init_err;
7046 }
7047
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04007048 bnxt_hwrm_func_qcfg(bp);
7049
Michael Chanc0c050c2015-10-22 16:01:17 -04007050 bnxt_set_tpa_flags(bp);
7051 bnxt_set_ring_params(bp);
Michael Chan33c26572016-12-07 00:26:15 -05007052 bnxt_set_max_func_irqs(bp, max_irqs);
Michael Chan6e6c5a52016-01-02 23:45:02 -05007053 bnxt_set_dflt_rings(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007054
Michael Chan87da7f72016-11-16 21:13:09 -05007055 /* Default RSS hash cfg. */
7056 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
7057 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
7058 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
7059 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
7060 if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) &&
7061 !BNXT_CHIP_TYPE_NITRO_A0(bp) &&
7062 bp->hwrm_spec_code >= 0x10501) {
7063 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
7064 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
7065 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
7066 }
7067
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04007068 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
Michael Chan2bcfa6f2015-12-27 18:19:24 -05007069 dev->hw_features |= NETIF_F_NTUPLE;
7070 if (bnxt_rfs_capable(bp)) {
7071 bp->flags |= BNXT_FLAG_RFS;
7072 dev->features |= NETIF_F_NTUPLE;
7073 }
7074 }
7075
Michael Chanc0c050c2015-10-22 16:01:17 -04007076 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
7077 bp->flags |= BNXT_FLAG_STRIP_VLAN;
7078
7079 rc = bnxt_probe_phy(bp);
7080 if (rc)
7081 goto init_err;
7082
Michael Chanaa8ed022016-12-07 00:26:17 -05007083 rc = bnxt_hwrm_func_reset(bp);
7084 if (rc)
7085 goto init_err;
7086
Michael Chan78095922016-12-07 00:26:16 -05007087 rc = bnxt_init_int_mode(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007088 if (rc)
7089 goto init_err;
7090
Michael Chan78095922016-12-07 00:26:16 -05007091 rc = register_netdev(dev);
7092 if (rc)
7093 goto init_err_clr_int;
7094
Michael Chanc0c050c2015-10-22 16:01:17 -04007095 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
7096 board_info[ent->driver_data].name,
7097 (long)pci_resource_start(pdev, 0), dev->dev_addr);
7098
Ajit Khaparde90c4f782016-05-15 03:04:45 -04007099 bnxt_parse_log_pcie_link(bp);
7100
Michael Chanc0c050c2015-10-22 16:01:17 -04007101 return 0;
7102
Michael Chan78095922016-12-07 00:26:16 -05007103init_err_clr_int:
7104 bnxt_clear_int_mode(bp);
7105
Michael Chanc0c050c2015-10-22 16:01:17 -04007106init_err:
7107 pci_iounmap(pdev, bp->bar0);
7108 pci_release_regions(pdev);
7109 pci_disable_device(pdev);
7110
7111init_err_free:
7112 free_netdev(dev);
7113 return rc;
7114}
7115
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007116/**
7117 * bnxt_io_error_detected - called when PCI error is detected
7118 * @pdev: Pointer to PCI device
7119 * @state: The current pci connection state
7120 *
7121 * This function is called after a PCI bus error affecting
7122 * this device has been detected.
7123 */
7124static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
7125 pci_channel_state_t state)
7126{
7127 struct net_device *netdev = pci_get_drvdata(pdev);
Michael Chana588e452016-12-07 00:26:21 -05007128 struct bnxt *bp = netdev_priv(netdev);
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007129
7130 netdev_info(netdev, "PCI I/O error detected\n");
7131
7132 rtnl_lock();
7133 netif_device_detach(netdev);
7134
Michael Chana588e452016-12-07 00:26:21 -05007135 bnxt_ulp_stop(bp);
7136
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007137 if (state == pci_channel_io_perm_failure) {
7138 rtnl_unlock();
7139 return PCI_ERS_RESULT_DISCONNECT;
7140 }
7141
7142 if (netif_running(netdev))
7143 bnxt_close(netdev);
7144
7145 pci_disable_device(pdev);
7146 rtnl_unlock();
7147
7148 /* Request a slot slot reset. */
7149 return PCI_ERS_RESULT_NEED_RESET;
7150}
7151
7152/**
7153 * bnxt_io_slot_reset - called after the pci bus has been reset.
7154 * @pdev: Pointer to PCI device
7155 *
7156 * Restart the card from scratch, as if from a cold-boot.
7157 * At this point, the card has exprienced a hard reset,
7158 * followed by fixups by BIOS, and has its config space
7159 * set up identically to what it was at cold boot.
7160 */
7161static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
7162{
7163 struct net_device *netdev = pci_get_drvdata(pdev);
7164 struct bnxt *bp = netdev_priv(netdev);
7165 int err = 0;
7166 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
7167
7168 netdev_info(bp->dev, "PCI Slot Reset\n");
7169
7170 rtnl_lock();
7171
7172 if (pci_enable_device(pdev)) {
7173 dev_err(&pdev->dev,
7174 "Cannot re-enable PCI device after reset.\n");
7175 } else {
7176 pci_set_master(pdev);
7177
Michael Chanaa8ed022016-12-07 00:26:17 -05007178 err = bnxt_hwrm_func_reset(bp);
7179 if (!err && netif_running(netdev))
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007180 err = bnxt_open(netdev);
7181
Michael Chana588e452016-12-07 00:26:21 -05007182 if (!err) {
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007183 result = PCI_ERS_RESULT_RECOVERED;
Michael Chana588e452016-12-07 00:26:21 -05007184 bnxt_ulp_start(bp);
7185 }
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007186 }
7187
7188 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
7189 dev_close(netdev);
7190
7191 rtnl_unlock();
7192
7193 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7194 if (err) {
7195 dev_err(&pdev->dev,
7196 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7197 err); /* non-fatal, continue */
7198 }
7199
7200 return PCI_ERS_RESULT_RECOVERED;
7201}
7202
7203/**
7204 * bnxt_io_resume - called when traffic can start flowing again.
7205 * @pdev: Pointer to PCI device
7206 *
7207 * This callback is called when the error recovery driver tells
7208 * us that its OK to resume normal operation.
7209 */
7210static void bnxt_io_resume(struct pci_dev *pdev)
7211{
7212 struct net_device *netdev = pci_get_drvdata(pdev);
7213
7214 rtnl_lock();
7215
7216 netif_device_attach(netdev);
7217
7218 rtnl_unlock();
7219}
7220
7221static const struct pci_error_handlers bnxt_err_handler = {
7222 .error_detected = bnxt_io_error_detected,
7223 .slot_reset = bnxt_io_slot_reset,
7224 .resume = bnxt_io_resume
7225};
7226
Michael Chanc0c050c2015-10-22 16:01:17 -04007227static struct pci_driver bnxt_pci_driver = {
7228 .name = DRV_MODULE_NAME,
7229 .id_table = bnxt_pci_tbl,
7230 .probe = bnxt_init_one,
7231 .remove = bnxt_remove_one,
Satish Baddipadige6316ea62016-03-07 15:38:48 -05007232 .err_handler = &bnxt_err_handler,
Michael Chanc0c050c2015-10-22 16:01:17 -04007233#if defined(CONFIG_BNXT_SRIOV)
7234 .sriov_configure = bnxt_sriov_configure,
7235#endif
7236};
7237
7238module_pci_driver(bnxt_pci_driver);