blob: dcb930a52613f737067fd141ba2928daec967764 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
Wei Yongjune6053dd2016-09-25 15:40:36 +000047static struct workqueue_struct *be_err_recovery_workq;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053048
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
Wei Yongjune6053dd2016-09-25 15:40:36 +000063static struct workqueue_struct *be_wq;
Sathya Perlab7172412016-07-27 05:26:18 -040064
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Suresh Reddy988d44b2016-09-07 19:57:52 +0530272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
278 if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
279 mac)) {
280 /* mac already added, skip addition */
281 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
282 return 0;
283 }
284 }
285
286 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
287 &adapter->pmac_id[0], 0);
288}
289
290static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
291{
292 int i;
293
294 /* Skip deletion if the programmed mac is
295 * being used in uc-list
296 */
297 for (i = 0; i < adapter->uc_macs; i++) {
298 if (adapter->pmac_id[i + 1] == pmac_id)
299 return;
300 }
301 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
302}
303
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700304static int be_mac_addr_set(struct net_device *netdev, void *p)
305{
306 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530309 int status;
310 u8 mac[ETH_ALEN];
Suresh Reddy988d44b2016-09-07 19:57:52 +0530311 u32 old_pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000313 if (!is_valid_ether_addr(addr->sa_data))
314 return -EADDRNOTAVAIL;
315
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530316 /* Proceed further only if, User provided MAC is different
317 * from active MAC
318 */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530319 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530320 return 0;
321
Kalesh APbcc84142015-08-05 03:27:48 -0400322 /* if device is not running, copy MAC to netdev->dev_addr */
323 if (!netif_running(netdev))
324 goto done;
325
Sathya Perla5a712c12013-07-23 15:24:59 +0530326 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
327 * privilege or if PF did not provision the new MAC address.
328 * On BE3, this cmd will always fail if the VF doesn't have the
329 * FILTMGMT privilege. This failure is OK, only if the PF programmed
330 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000331 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530332 mutex_lock(&adapter->rx_filter_lock);
333 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
Sathya Perla5a712c12013-07-23 15:24:59 +0530334 if (!status) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530335
336 /* Delete the old programmed MAC. This call may fail if the
337 * old MAC was already deleted by the PF driver.
338 */
339 if (adapter->pmac_id[0] != old_pmac_id)
Suresh Reddy988d44b2016-09-07 19:57:52 +0530340 be_dev_mac_del(adapter, old_pmac_id);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000341 }
342
Suresh Reddy988d44b2016-09-07 19:57:52 +0530343 mutex_unlock(&adapter->rx_filter_lock);
Sathya Perla5a712c12013-07-23 15:24:59 +0530344 /* Decide if the new MAC is successfully activated only after
345 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000346 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530347 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
Suresh Reddyb188f092014-01-15 13:23:39 +0530348 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000349 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000350 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700351
Sathya Perla5a712c12013-07-23 15:24:59 +0530352 /* The MAC change did not happen, either due to lack of privilege
353 * or PF didn't pre-provision.
354 */
dingtianhong61d23e92013-12-30 15:40:43 +0800355 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530356 status = -EPERM;
357 goto err;
358 }
Kalesh APbcc84142015-08-05 03:27:48 -0400359done:
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530360 ether_addr_copy(adapter->dev_mac, addr->sa_data);
Kalesh APbcc84142015-08-05 03:27:48 -0400361 ether_addr_copy(netdev->dev_addr, addr->sa_data);
362 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000363 return 0;
364err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530365 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700366 return status;
367}
368
Sathya Perlaca34fe32012-11-06 17:48:56 +0000369/* BE2 supports only v0 cmd */
370static void *hw_stats_from_cmd(struct be_adapter *adapter)
371{
372 if (BE2_chip(adapter)) {
373 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
374
375 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500376 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000377 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
378
379 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500380 } else {
381 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
382
383 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000384 }
385}
386
387/* BE2 supports only v0 cmd */
388static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
389{
390 if (BE2_chip(adapter)) {
391 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
392
393 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500394 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396
397 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500398 } else {
399 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
400
401 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000402 }
403}
404
405static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000407 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
408 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
409 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 &rxf_stats->port[adapter->port_num];
412 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->rx_pause_frames = port_stats->rx_pause_frames;
416 drvs->rx_crc_errors = port_stats->rx_crc_errors;
417 drvs->rx_control_frames = port_stats->rx_control_frames;
418 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
419 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
420 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
421 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
422 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
423 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
424 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000429 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->rx_dropped_header_too_small =
431 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000432 drvs->rx_address_filtered =
433 port_stats->rx_address_filtered +
434 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000435 drvs->rx_alignment_symbol_errors =
436 port_stats->rx_alignment_symbol_errors;
437
438 drvs->tx_pauseframes = port_stats->tx_pauseframes;
439 drvs->tx_controlframes = port_stats->tx_controlframes;
440
441 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000442 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000444 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
Sathya Perlaca34fe32012-11-06 17:48:56 +0000454static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000455{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000456 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000459 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perlaac124ff2011-07-25 19:10:14 +0000463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
498}
499
Ajit Khaparde61000862013-10-03 16:16:33 -0500500static void populate_be_v2_stats(struct be_adapter *adapter)
501{
502 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
503 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
504 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
505 struct be_port_rxf_stats_v2 *port_stats =
506 &rxf_stats->port[adapter->port_num];
507 struct be_drv_stats *drvs = &adapter->drv_stats;
508
509 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
510 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
511 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
512 drvs->rx_pause_frames = port_stats->rx_pause_frames;
513 drvs->rx_crc_errors = port_stats->rx_crc_errors;
514 drvs->rx_control_frames = port_stats->rx_control_frames;
515 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
516 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
517 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
518 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
519 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
520 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
521 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
522 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
523 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
524 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
525 drvs->rx_dropped_header_too_small =
526 port_stats->rx_dropped_header_too_small;
527 drvs->rx_input_fifo_overflow_drop =
528 port_stats->rx_input_fifo_overflow_drop;
529 drvs->rx_address_filtered = port_stats->rx_address_filtered;
530 drvs->rx_alignment_symbol_errors =
531 port_stats->rx_alignment_symbol_errors;
532 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
533 drvs->tx_pauseframes = port_stats->tx_pauseframes;
534 drvs->tx_controlframes = port_stats->tx_controlframes;
535 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
536 drvs->jabber_events = port_stats->jabber_events;
537 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
538 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
539 drvs->forwarded_packets = rxf_stats->forwarded_packets;
540 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
541 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
542 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
543 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530544 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500545 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
546 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
547 drvs->rx_roce_frames = port_stats->roce_frames_received;
548 drvs->roce_drops_crc = port_stats->roce_drops_crc;
549 drvs->roce_drops_payload_len =
550 port_stats->roce_drops_payload_len;
551 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500552}
553
Selvin Xavier005d5692011-05-16 07:36:35 +0000554static void populate_lancer_stats(struct be_adapter *adapter)
555{
Selvin Xavier005d5692011-05-16 07:36:35 +0000556 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530557 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
559 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
560 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
561 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
562 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000564 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
566 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
567 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
568 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
569 drvs->rx_dropped_tcp_length =
570 pport_stats->rx_dropped_invalid_tcp_length;
571 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
572 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
573 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
574 drvs->rx_dropped_header_too_small =
575 pport_stats->rx_dropped_header_too_small;
576 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000577 drvs->rx_address_filtered =
578 pport_stats->rx_address_filtered +
579 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000580 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000581 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000582 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
583 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000584 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000585 drvs->forwarded_packets = pport_stats->num_forwards_lo;
586 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000587 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000589}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590
Sathya Perla09c1c682011-08-22 19:41:53 +0000591static void accumulate_16bit_val(u32 *acc, u16 val)
592{
593#define lo(x) (x & 0xFFFF)
594#define hi(x) (x & 0xFFFF0000)
595 bool wrapped = val < lo(*acc);
596 u32 newacc = hi(*acc) + val;
597
598 if (wrapped)
599 newacc += 65536;
600 ACCESS_ONCE(*acc) = newacc;
601}
602
Jingoo Han4188e7d2013-08-05 18:02:02 +0900603static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530604 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000605{
606 if (!BEx_chip(adapter))
607 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
608 else
609 /* below erx HW counter can actually wrap around after
610 * 65535. Driver accumulates a 32-bit value
611 */
612 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
613 (u16)erx_stat);
614}
615
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616void be_parse_stats(struct be_adapter *adapter)
617{
Ajit Khaparde61000862013-10-03 16:16:33 -0500618 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000619 struct be_rx_obj *rxo;
620 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000621 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000622
Sathya Perlaca34fe32012-11-06 17:48:56 +0000623 if (lancer_chip(adapter)) {
624 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000625 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000626 if (BE2_chip(adapter))
627 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500628 else if (BE3_chip(adapter))
629 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000630 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500631 else
632 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000633
Ajit Khaparde61000862013-10-03 16:16:33 -0500634 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000635 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000636 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
637 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000638 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000639 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000640}
641
Sathya Perlaab1594e2011-07-25 19:10:15 +0000642static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530643 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000645 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000646 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700647 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000648 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 u64 pkts, bytes;
650 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700651 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
Sathya Perla3abcded2010-10-03 22:12:27 -0700653 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530655
Sathya Perlaab1594e2011-07-25 19:10:15 +0000656 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700657 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000658 pkts = rx_stats(rxo)->rx_pkts;
659 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700660 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 stats->rx_packets += pkts;
662 stats->rx_bytes += bytes;
663 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
664 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
665 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700666 }
667
Sathya Perla3c8def92011-06-12 20:01:58 +0000668 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530670
Sathya Perlaab1594e2011-07-25 19:10:15 +0000671 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700672 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 pkts = tx_stats(txo)->tx_pkts;
674 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700675 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000676 stats->tx_packets += pkts;
677 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000678 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679
680 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000681 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000682 drvs->rx_alignment_symbol_errors +
683 drvs->rx_in_range_errors +
684 drvs->rx_out_range_errors +
685 drvs->rx_frame_too_long +
686 drvs->rx_dropped_too_small +
687 drvs->rx_dropped_too_short +
688 drvs->rx_dropped_header_too_small +
689 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000690 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000693 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000694 drvs->rx_out_range_errors +
695 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000696
Sathya Perlaab1594e2011-07-25 19:10:15 +0000697 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698
699 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000700 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000701
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 /* receiver fifo overrun */
703 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000704 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000705 drvs->rx_input_fifo_overflow_drop +
706 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000707 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708}
709
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000710void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 struct net_device *netdev = adapter->netdev;
713
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000714 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000715 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000716 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000718
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530719 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000720 netif_carrier_on(netdev);
721 else
722 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200723
724 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725}
726
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500727static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700728{
Sathya Perla3c8def92011-06-12 20:01:58 +0000729 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530730 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000731
Sathya Perlaab1594e2011-07-25 19:10:15 +0000732 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000733 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500734 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530735 stats->tx_pkts += tx_pkts;
736 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
737 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000738 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739}
740
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500741/* Returns number of WRBs needed for the skb */
742static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700743{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500744 /* +1 for the header wrb */
745 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746}
747
748static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
749{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500750 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
751 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
752 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
753 wrb->rsvd0 = 0;
754}
755
756/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
757 * to avoid the swap and shift/mask operations in wrb_fill().
758 */
759static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
760{
761 wrb->frag_pa_hi = 0;
762 wrb->frag_pa_lo = 0;
763 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000764 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765}
766
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000767static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530768 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769{
770 u8 vlan_prio;
771 u16 vlan_tag;
772
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100773 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000774 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
775 /* If vlan priority provided by OS is NOT in available bmap */
776 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
777 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500778 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000779
780 return vlan_tag;
781}
782
Sathya Perlac9c47142014-03-27 10:46:19 +0530783/* Used only for IP tunnel packets */
784static u16 skb_inner_ip_proto(struct sk_buff *skb)
785{
786 return (inner_ip_hdr(skb)->version == 4) ?
787 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
788}
789
790static u16 skb_ip_proto(struct sk_buff *skb)
791{
792 return (ip_hdr(skb)->version == 4) ?
793 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
794}
795
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530796static inline bool be_is_txq_full(struct be_tx_obj *txo)
797{
798 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
799}
800
801static inline bool be_can_txq_wake(struct be_tx_obj *txo)
802{
803 return atomic_read(&txo->q.used) < txo->q.len / 2;
804}
805
806static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
807{
808 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
809}
810
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530811static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
812 struct sk_buff *skb,
813 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530815 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000817 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530818 BE_WRB_F_SET(wrb_params->features, LSO, 1);
819 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000820 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530821 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530823 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530824 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530825 proto = skb_inner_ip_proto(skb);
826 } else {
827 proto = skb_ip_proto(skb);
828 }
829 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530830 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530831 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530832 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100835 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530836 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
837 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838 }
839
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530840 BE_WRB_F_SET(wrb_params->features, CRC, 1);
841}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500842
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530843static void wrb_fill_hdr(struct be_adapter *adapter,
844 struct be_eth_hdr_wrb *hdr,
845 struct be_wrb_params *wrb_params,
846 struct sk_buff *skb)
847{
848 memset(hdr, 0, sizeof(*hdr));
849
850 SET_TX_WRB_HDR_BITS(crc, hdr,
851 BE_WRB_F_GET(wrb_params->features, CRC));
852 SET_TX_WRB_HDR_BITS(ipcs, hdr,
853 BE_WRB_F_GET(wrb_params->features, IPCS));
854 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
855 BE_WRB_F_GET(wrb_params->features, TCPCS));
856 SET_TX_WRB_HDR_BITS(udpcs, hdr,
857 BE_WRB_F_GET(wrb_params->features, UDPCS));
858
859 SET_TX_WRB_HDR_BITS(lso, hdr,
860 BE_WRB_F_GET(wrb_params->features, LSO));
861 SET_TX_WRB_HDR_BITS(lso6, hdr,
862 BE_WRB_F_GET(wrb_params->features, LSO6));
863 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
864
865 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
866 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500867 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530868 SET_TX_WRB_HDR_BITS(event, hdr,
869 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
870 SET_TX_WRB_HDR_BITS(vlan, hdr,
871 BE_WRB_F_GET(wrb_params->features, VLAN));
872 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
873
874 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
875 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530876 SET_TX_WRB_HDR_BITS(mgmt, hdr,
877 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878}
879
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000880static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530881 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000882{
883 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500884 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000885
Sathya Perla7101e112010-03-22 20:41:12 +0000886
Sathya Perlaf986afc2015-02-06 08:18:43 -0500887 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
888 (u64)le32_to_cpu(wrb->frag_pa_lo);
889 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000890 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500891 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000892 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500893 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000894 }
895}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530897/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530898static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530900 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530902 queue_head_inc(&txo->q);
903 return head;
904}
905
906/* Set up the WRB header for xmit */
907static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
908 struct be_tx_obj *txo,
909 struct be_wrb_params *wrb_params,
910 struct sk_buff *skb, u16 head)
911{
912 u32 num_frags = skb_wrb_cnt(skb);
913 struct be_queue_info *txq = &txo->q;
914 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
915
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530916 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500917 be_dws_cpu_to_le(hdr, sizeof(*hdr));
918
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500919 BUG_ON(txo->sent_skb_list[head]);
920 txo->sent_skb_list[head] = skb;
921 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530922 atomic_add(num_frags, &txq->used);
923 txo->last_req_wrb_cnt = num_frags;
924 txo->pend_wrb_cnt += num_frags;
925}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700926
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530927/* Setup a WRB fragment (buffer descriptor) for xmit */
928static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
929 int len)
930{
931 struct be_eth_wrb *wrb;
932 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530934 wrb = queue_head_node(txq);
935 wrb_fill(wrb, busaddr, len);
936 queue_head_inc(txq);
937}
938
939/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
940 * was invoked. The producer index is restored to the previous packet and the
941 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
942 */
943static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530944 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530945 u32 copied)
946{
947 struct device *dev;
948 struct be_eth_wrb *wrb;
949 struct be_queue_info *txq = &txo->q;
950
951 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500952 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530953
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500954 /* skip the first wrb (hdr); it's not mapped */
955 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000956 while (copied) {
957 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000958 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000959 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500960 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000961 queue_head_inc(txq);
962 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530963
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500964 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530965}
966
967/* Enqueue the given packet for transmit. This routine allocates WRBs for the
968 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
969 * of WRBs used up by the packet.
970 */
971static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
972 struct sk_buff *skb,
973 struct be_wrb_params *wrb_params)
974{
975 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
976 struct device *dev = &adapter->pdev->dev;
977 struct be_queue_info *txq = &txo->q;
978 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530979 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530980 dma_addr_t busaddr;
981 int len;
982
983 head = be_tx_get_wrb_hdr(txo);
984
985 if (skb->len > skb->data_len) {
986 len = skb_headlen(skb);
987
988 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
989 if (dma_mapping_error(dev, busaddr))
990 goto dma_err;
991 map_single = true;
992 be_tx_setup_wrb_frag(txo, busaddr, len);
993 copied += len;
994 }
995
996 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
997 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
998 len = skb_frag_size(frag);
999
1000 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1001 if (dma_mapping_error(dev, busaddr))
1002 goto dma_err;
1003 be_tx_setup_wrb_frag(txo, busaddr, len);
1004 copied += len;
1005 }
1006
1007 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1008
1009 be_tx_stats_update(txo, skb);
1010 return wrb_cnt;
1011
1012dma_err:
1013 adapter->drv_stats.dma_map_errors++;
1014 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +00001015 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016}
1017
Sathya Perlaf7062ee2015-02-06 08:18:35 -05001018static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1019{
1020 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1021}
1022
Somnath Kotur93040ae2012-06-26 22:32:10 +00001023static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001024 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301025 struct be_wrb_params
1026 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +00001027{
1028 u16 vlan_tag = 0;
1029
1030 skb = skb_share_check(skb, GFP_ATOMIC);
1031 if (unlikely(!skb))
1032 return skb;
1033
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001034 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001035 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301036
1037 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1038 if (!vlan_tag)
1039 vlan_tag = adapter->pvid;
1040 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1041 * skip VLAN insertion
1042 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301043 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301044 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001045
1046 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001047 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1048 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001049 if (unlikely(!skb))
1050 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051 skb->vlan_tci = 0;
1052 }
1053
1054 /* Insert the outer VLAN, if any */
1055 if (adapter->qnq_vid) {
1056 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001057 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1058 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001059 if (unlikely(!skb))
1060 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301061 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001062 }
1063
Somnath Kotur93040ae2012-06-26 22:32:10 +00001064 return skb;
1065}
1066
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001067static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1068{
1069 struct ethhdr *eh = (struct ethhdr *)skb->data;
1070 u16 offset = ETH_HLEN;
1071
1072 if (eh->h_proto == htons(ETH_P_IPV6)) {
1073 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1074
1075 offset += sizeof(struct ipv6hdr);
1076 if (ip6h->nexthdr != NEXTHDR_TCP &&
1077 ip6h->nexthdr != NEXTHDR_UDP) {
1078 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301079 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001080
1081 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1082 if (ehdr->hdrlen == 0xff)
1083 return true;
1084 }
1085 }
1086 return false;
1087}
1088
1089static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1090{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001091 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001092}
1093
Sathya Perla748b5392014-05-09 13:29:13 +05301094static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001095{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001096 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001097}
1098
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301099static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1100 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301101 struct be_wrb_params
1102 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001104 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001105 unsigned int eth_hdr_len;
1106 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001107
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001108 /* For padded packets, BE HW modifies tot_len field in IP header
1109 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001110 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001111 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001112 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1113 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001114 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001115 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001116 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001117 ip = (struct iphdr *)ip_hdr(skb);
1118 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1119 }
1120
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001121 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301122 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001123 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301124 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001125 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301126 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001127
Somnath Kotur93040ae2012-06-26 22:32:10 +00001128 /* HW has a bug wherein it will calculate CSUM for VLAN
1129 * pkts even though it is disabled.
1130 * Manually insert VLAN in pkt.
1131 */
1132 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001133 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301134 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001135 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301136 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001137 }
1138
1139 /* HW may lockup when VLAN HW tagging is requested on
1140 * certain ipv6 packets. Drop such pkts if the HW workaround to
1141 * skip HW tagging is not enabled by FW.
1142 */
1143 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301144 (adapter->pvid || adapter->qnq_vid) &&
1145 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001146 goto tx_drop;
1147
1148 /* Manual VLAN tag insertion to prevent:
1149 * ASIC lockup when the ASIC inserts VLAN tag into
1150 * certain ipv6 packets. Insert VLAN tags in driver,
1151 * and set event, completion, vlan bits accordingly
1152 * in the Tx WRB.
1153 */
1154 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1155 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301156 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001157 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301158 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001159 }
1160
Sathya Perlaee9c7992013-05-22 23:04:55 +00001161 return skb;
1162tx_drop:
1163 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301164err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001165 return NULL;
1166}
1167
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301168static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1169 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301170 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301171{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301172 int err;
1173
Suresh Reddy8227e992015-10-12 03:47:19 -04001174 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1175 * packets that are 32b or less may cause a transmit stall
1176 * on that port. The workaround is to pad such packets
1177 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301178 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001179 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001180 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301181 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301182 }
1183
1184 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301185 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301186 if (!skb)
1187 return NULL;
1188 }
1189
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301190 /* The stack can send us skbs with length greater than
1191 * what the HW can handle. Trim the extra bytes.
1192 */
1193 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1194 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1195 WARN_ON(err);
1196
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301197 return skb;
1198}
1199
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001200static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1201{
1202 struct be_queue_info *txq = &txo->q;
1203 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1204
1205 /* Mark the last request eventable if it hasn't been marked already */
1206 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1207 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1208
1209 /* compose a dummy wrb if there are odd set of wrbs to notify */
1210 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001211 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001212 queue_head_inc(txq);
1213 atomic_inc(&txq->used);
1214 txo->pend_wrb_cnt++;
1215 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1216 TX_HDR_WRB_NUM_SHIFT);
1217 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1218 TX_HDR_WRB_NUM_SHIFT);
1219 }
1220 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1221 txo->pend_wrb_cnt = 0;
1222}
1223
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301224/* OS2BMC related */
1225
1226#define DHCP_CLIENT_PORT 68
1227#define DHCP_SERVER_PORT 67
1228#define NET_BIOS_PORT1 137
1229#define NET_BIOS_PORT2 138
1230#define DHCPV6_RAS_PORT 547
1231
1232#define is_mc_allowed_on_bmc(adapter, eh) \
1233 (!is_multicast_filt_enabled(adapter) && \
1234 is_multicast_ether_addr(eh->h_dest) && \
1235 !is_broadcast_ether_addr(eh->h_dest))
1236
1237#define is_bc_allowed_on_bmc(adapter, eh) \
1238 (!is_broadcast_filt_enabled(adapter) && \
1239 is_broadcast_ether_addr(eh->h_dest))
1240
1241#define is_arp_allowed_on_bmc(adapter, skb) \
1242 (is_arp(skb) && is_arp_filt_enabled(adapter))
1243
1244#define is_broadcast_packet(eh, adapter) \
1245 (is_multicast_ether_addr(eh->h_dest) && \
1246 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1247
1248#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1249
1250#define is_arp_filt_enabled(adapter) \
1251 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1252
1253#define is_dhcp_client_filt_enabled(adapter) \
1254 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1255
1256#define is_dhcp_srvr_filt_enabled(adapter) \
1257 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1258
1259#define is_nbios_filt_enabled(adapter) \
1260 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1261
1262#define is_ipv6_na_filt_enabled(adapter) \
1263 (adapter->bmc_filt_mask & \
1264 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1265
1266#define is_ipv6_ra_filt_enabled(adapter) \
1267 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1268
1269#define is_ipv6_ras_filt_enabled(adapter) \
1270 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1271
1272#define is_broadcast_filt_enabled(adapter) \
1273 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1274
1275#define is_multicast_filt_enabled(adapter) \
1276 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1277
1278static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1279 struct sk_buff **skb)
1280{
1281 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1282 bool os2bmc = false;
1283
1284 if (!be_is_os2bmc_enabled(adapter))
1285 goto done;
1286
1287 if (!is_multicast_ether_addr(eh->h_dest))
1288 goto done;
1289
1290 if (is_mc_allowed_on_bmc(adapter, eh) ||
1291 is_bc_allowed_on_bmc(adapter, eh) ||
1292 is_arp_allowed_on_bmc(adapter, (*skb))) {
1293 os2bmc = true;
1294 goto done;
1295 }
1296
1297 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1298 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1299 u8 nexthdr = hdr->nexthdr;
1300
1301 if (nexthdr == IPPROTO_ICMPV6) {
1302 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1303
1304 switch (icmp6->icmp6_type) {
1305 case NDISC_ROUTER_ADVERTISEMENT:
1306 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1307 goto done;
1308 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1309 os2bmc = is_ipv6_na_filt_enabled(adapter);
1310 goto done;
1311 default:
1312 break;
1313 }
1314 }
1315 }
1316
1317 if (is_udp_pkt((*skb))) {
1318 struct udphdr *udp = udp_hdr((*skb));
1319
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001320 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301321 case DHCP_CLIENT_PORT:
1322 os2bmc = is_dhcp_client_filt_enabled(adapter);
1323 goto done;
1324 case DHCP_SERVER_PORT:
1325 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1326 goto done;
1327 case NET_BIOS_PORT1:
1328 case NET_BIOS_PORT2:
1329 os2bmc = is_nbios_filt_enabled(adapter);
1330 goto done;
1331 case DHCPV6_RAS_PORT:
1332 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1333 goto done;
1334 default:
1335 break;
1336 }
1337 }
1338done:
1339 /* For packets over a vlan, which are destined
1340 * to BMC, asic expects the vlan to be inline in the packet.
1341 */
1342 if (os2bmc)
1343 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1344
1345 return os2bmc;
1346}
1347
Sathya Perlaee9c7992013-05-22 23:04:55 +00001348static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1349{
1350 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001351 u16 q_idx = skb_get_queue_mapping(skb);
1352 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301353 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301354 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001355 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001356
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301357 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001358 if (unlikely(!skb))
1359 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001360
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301361 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1362
1363 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001364 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001365 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001366 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001368
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301369 /* if os2bmc is enabled and if the pkt is destined to bmc,
1370 * enqueue the pkt a 2nd time with mgmt bit set.
1371 */
1372 if (be_send_pkt_to_bmc(adapter, &skb)) {
1373 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1374 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1375 if (unlikely(!wrb_cnt))
1376 goto drop;
1377 else
1378 skb_get(skb);
1379 }
1380
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301381 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001382 netif_stop_subqueue(netdev, q_idx);
1383 tx_stats(txo)->tx_stops++;
1384 }
1385
1386 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1387 be_xmit_flush(adapter, txo);
1388
1389 return NETDEV_TX_OK;
1390drop:
1391 tx_stats(txo)->tx_drv_drops++;
1392 /* Flush the already enqueued tx requests */
1393 if (flush && txo->pend_wrb_cnt)
1394 be_xmit_flush(adapter, txo);
1395
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 return NETDEV_TX_OK;
1397}
1398
1399static int be_change_mtu(struct net_device *netdev, int new_mtu)
1400{
1401 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301402 struct device *dev = &adapter->pdev->dev;
1403
1404 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1405 dev_info(dev, "MTU must be between %d and %d bytes\n",
1406 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 return -EINVAL;
1408 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301409
1410 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301411 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 netdev->mtu = new_mtu;
1413 return 0;
1414}
1415
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001416static inline bool be_in_all_promisc(struct be_adapter *adapter)
1417{
1418 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1419 BE_IF_FLAGS_ALL_PROMISCUOUS;
1420}
1421
1422static int be_set_vlan_promisc(struct be_adapter *adapter)
1423{
1424 struct device *dev = &adapter->pdev->dev;
1425 int status;
1426
1427 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1428 return 0;
1429
1430 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1431 if (!status) {
1432 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1433 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1434 } else {
1435 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1436 }
1437 return status;
1438}
1439
1440static int be_clear_vlan_promisc(struct be_adapter *adapter)
1441{
1442 struct device *dev = &adapter->pdev->dev;
1443 int status;
1444
1445 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1446 if (!status) {
1447 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1448 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1449 }
1450 return status;
1451}
1452
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001454 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1455 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 */
Sathya Perla10329df2012-06-05 19:37:18 +00001457static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458{
Vasundhara Volam50762662014-09-12 17:39:14 +05301459 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001460 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301461 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001462 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001463
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001464 /* No need to change the VLAN state if the I/F is in promiscuous */
1465 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001466 return 0;
1467
Sathya Perla92bf14a2013-08-27 16:57:32 +05301468 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001469 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001470
Somnath Kotur841f60f2016-07-27 05:26:15 -04001471 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1472 status = be_clear_vlan_promisc(adapter);
1473 if (status)
1474 return status;
1475 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001476 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301477 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1478 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001479
Vasundhara Volam435452a2015-03-20 06:28:23 -04001480 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001481 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001482 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001483 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001484 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1485 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301486 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001487 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001489 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
Patrick McHardy80d5c362013-04-19 02:04:28 +00001492static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493{
1494 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001495 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496
Sathya Perlab7172412016-07-27 05:26:18 -04001497 mutex_lock(&adapter->rx_filter_lock);
1498
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001499 /* Packets with VID 0 are always received by Lancer by default */
1500 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001501 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301502
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301503 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001504 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001505
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301506 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301507 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001508
Sathya Perlab7172412016-07-27 05:26:18 -04001509 status = be_vid_config(adapter);
1510done:
1511 mutex_unlock(&adapter->rx_filter_lock);
1512 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513}
1514
Patrick McHardy80d5c362013-04-19 02:04:28 +00001515static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516{
1517 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001518 int status = 0;
1519
1520 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001522 /* Packets with VID 0 are always received by Lancer by default */
1523 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001524 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001525
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301526 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001527 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301528
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301529 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301530 adapter->vlans_added--;
1531
Sathya Perlab7172412016-07-27 05:26:18 -04001532 status = be_vid_config(adapter);
1533done:
1534 mutex_unlock(&adapter->rx_filter_lock);
1535 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536}
1537
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001538static void be_set_all_promisc(struct be_adapter *adapter)
1539{
1540 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1541 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1542}
1543
1544static void be_set_mc_promisc(struct be_adapter *adapter)
1545{
1546 int status;
1547
1548 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1549 return;
1550
1551 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1552 if (!status)
1553 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1554}
1555
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001556static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001557{
1558 int status;
1559
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001560 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1561 return;
1562
1563 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001564 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001565 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1566}
1567
1568static void be_clear_uc_promisc(struct be_adapter *adapter)
1569{
1570 int status;
1571
1572 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1573 return;
1574
1575 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1576 if (!status)
1577 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1578}
1579
1580/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1581 * We use a single callback function for both sync and unsync. We really don't
1582 * add/remove addresses through this callback. But, we use it to detect changes
1583 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1584 */
1585static int be_uc_list_update(struct net_device *netdev,
1586 const unsigned char *addr)
1587{
1588 struct be_adapter *adapter = netdev_priv(netdev);
1589
1590 adapter->update_uc_list = true;
1591 return 0;
1592}
1593
1594static int be_mc_list_update(struct net_device *netdev,
1595 const unsigned char *addr)
1596{
1597 struct be_adapter *adapter = netdev_priv(netdev);
1598
1599 adapter->update_mc_list = true;
1600 return 0;
1601}
1602
1603static void be_set_mc_list(struct be_adapter *adapter)
1604{
1605 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001606 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001607 bool mc_promisc = false;
1608 int status;
1609
Sathya Perlab7172412016-07-27 05:26:18 -04001610 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001611 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1612
1613 if (netdev->flags & IFF_PROMISC) {
1614 adapter->update_mc_list = false;
1615 } else if (netdev->flags & IFF_ALLMULTI ||
1616 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1617 /* Enable multicast promisc if num configured exceeds
1618 * what we support
1619 */
1620 mc_promisc = true;
1621 adapter->update_mc_list = false;
1622 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1623 /* Update mc-list unconditionally if the iface was previously
1624 * in mc-promisc mode and now is out of that mode.
1625 */
1626 adapter->update_mc_list = true;
1627 }
1628
Sathya Perlab7172412016-07-27 05:26:18 -04001629 if (adapter->update_mc_list) {
1630 int i = 0;
1631
1632 /* cache the mc-list in adapter */
1633 netdev_for_each_mc_addr(ha, netdev) {
1634 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1635 i++;
1636 }
1637 adapter->mc_count = netdev_mc_count(netdev);
1638 }
1639 netif_addr_unlock_bh(netdev);
1640
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001641 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001642 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001643 } else if (adapter->update_mc_list) {
1644 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1645 if (!status)
1646 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1647 else
1648 be_set_mc_promisc(adapter);
1649
1650 adapter->update_mc_list = false;
1651 }
1652}
1653
1654static void be_clear_mc_list(struct be_adapter *adapter)
1655{
1656 struct net_device *netdev = adapter->netdev;
1657
1658 __dev_mc_unsync(netdev, NULL);
1659 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001660 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001661}
1662
Suresh Reddy988d44b2016-09-07 19:57:52 +05301663static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1664{
1665 if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
Suresh Reddyc27ebf52016-09-07 19:57:53 +05301666 adapter->dev_mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05301667 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1668 return 0;
1669 }
1670
1671 return be_cmd_pmac_add(adapter,
1672 (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1673 adapter->if_handle,
1674 &adapter->pmac_id[uc_idx + 1], 0);
1675}
1676
1677static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1678{
1679 if (pmac_id == adapter->pmac_id[0])
1680 return;
1681
1682 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1683}
1684
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001685static void be_set_uc_list(struct be_adapter *adapter)
1686{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001687 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001688 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001689 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001690 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001691
Sathya Perlab7172412016-07-27 05:26:18 -04001692 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001693 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001694
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001695 if (netdev->flags & IFF_PROMISC) {
1696 adapter->update_uc_list = false;
1697 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1698 uc_promisc = true;
1699 adapter->update_uc_list = false;
1700 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1701 /* Update uc-list unconditionally if the iface was previously
1702 * in uc-promisc mode and now is out of that mode.
1703 */
1704 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001705 }
1706
Sathya Perlab7172412016-07-27 05:26:18 -04001707 if (adapter->update_uc_list) {
1708 i = 1; /* First slot is claimed by the Primary MAC */
1709
1710 /* cache the uc-list in adapter array */
1711 netdev_for_each_uc_addr(ha, netdev) {
1712 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1713 i++;
1714 }
1715 curr_uc_macs = netdev_uc_count(netdev);
1716 }
1717 netif_addr_unlock_bh(netdev);
1718
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001719 if (uc_promisc) {
1720 be_set_uc_promisc(adapter);
1721 } else if (adapter->update_uc_list) {
1722 be_clear_uc_promisc(adapter);
1723
Sathya Perlab7172412016-07-27 05:26:18 -04001724 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301725 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001726
Sathya Perlab7172412016-07-27 05:26:18 -04001727 for (i = 0; i < curr_uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301728 be_uc_mac_add(adapter, i);
Sathya Perlab7172412016-07-27 05:26:18 -04001729 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001730 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001731 }
1732}
1733
1734static void be_clear_uc_list(struct be_adapter *adapter)
1735{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001736 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001737 int i;
1738
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001739 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001740 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301741 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1742
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001743 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301744}
1745
Sathya Perlab7172412016-07-27 05:26:18 -04001746static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747{
Sathya Perlab7172412016-07-27 05:26:18 -04001748 struct net_device *netdev = adapter->netdev;
1749
1750 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751
1752 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001753 if (!be_in_all_promisc(adapter))
1754 be_set_all_promisc(adapter);
1755 } else if (be_in_all_promisc(adapter)) {
1756 /* We need to re-program the vlan-list or clear
1757 * vlan-promisc mode (if needed) when the interface
1758 * comes out of promisc mode.
1759 */
1760 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001762
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001763 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001764 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001765
1766 mutex_unlock(&adapter->rx_filter_lock);
1767}
1768
1769static void be_work_set_rx_mode(struct work_struct *work)
1770{
1771 struct be_cmd_work *cmd_work =
1772 container_of(work, struct be_cmd_work, work);
1773
1774 __be_set_rx_mode(cmd_work->adapter);
1775 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776}
1777
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001778static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1779{
1780 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001781 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001782 int status;
1783
Sathya Perla11ac75e2011-12-13 00:58:50 +00001784 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001785 return -EPERM;
1786
Sathya Perla11ac75e2011-12-13 00:58:50 +00001787 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001788 return -EINVAL;
1789
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301790 /* Proceed further only if user provided MAC is different
1791 * from active MAC
1792 */
1793 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1794 return 0;
1795
Sathya Perla3175d8c2013-07-23 15:25:03 +05301796 if (BEx_chip(adapter)) {
1797 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1798 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001799
Sathya Perla11ac75e2011-12-13 00:58:50 +00001800 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1801 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301802 } else {
1803 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1804 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001805 }
1806
Kalesh APabccf232014-07-17 16:20:24 +05301807 if (status) {
1808 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1809 mac, vf, status);
1810 return be_cmd_status(status);
1811 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001812
Kalesh APabccf232014-07-17 16:20:24 +05301813 ether_addr_copy(vf_cfg->mac_addr, mac);
1814
1815 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001816}
1817
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001818static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301819 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001820{
1821 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001822 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001823
Sathya Perla11ac75e2011-12-13 00:58:50 +00001824 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001825 return -EPERM;
1826
Sathya Perla11ac75e2011-12-13 00:58:50 +00001827 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001828 return -EINVAL;
1829
1830 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001831 vi->max_tx_rate = vf_cfg->tx_rate;
1832 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001833 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1834 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001835 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301836 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001837 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001838
1839 return 0;
1840}
1841
Vasundhara Volam435452a2015-03-20 06:28:23 -04001842static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1843{
1844 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1845 u16 vids[BE_NUM_VLANS_SUPPORTED];
1846 int vf_if_id = vf_cfg->if_handle;
1847 int status;
1848
1849 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001850 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001851 if (status)
1852 return status;
1853
1854 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1855 vids[0] = 0;
1856 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1857 if (!status)
1858 dev_info(&adapter->pdev->dev,
1859 "Cleared guest VLANs on VF%d", vf);
1860
1861 /* After TVT is enabled, disallow VFs to program VLAN filters */
1862 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1863 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1864 ~BE_PRIV_FILTMGMT, vf + 1);
1865 if (!status)
1866 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1867 }
1868 return 0;
1869}
1870
1871static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1872{
1873 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1874 struct device *dev = &adapter->pdev->dev;
1875 int status;
1876
1877 /* Reset Transparent VLAN Tagging. */
1878 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001879 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001880 if (status)
1881 return status;
1882
1883 /* Allow VFs to program VLAN filtering */
1884 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1885 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1886 BE_PRIV_FILTMGMT, vf + 1);
1887 if (!status) {
1888 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1889 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1890 }
1891 }
1892
1893 dev_info(dev,
1894 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1895 return 0;
1896}
1897
Moshe Shemesh79aab092016-09-22 12:11:15 +03001898static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1899 __be16 vlan_proto)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001900{
1901 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001902 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001903 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001904
Sathya Perla11ac75e2011-12-13 00:58:50 +00001905 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001906 return -EPERM;
1907
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001908 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001909 return -EINVAL;
1910
Moshe Shemesh79aab092016-09-22 12:11:15 +03001911 if (vlan_proto != htons(ETH_P_8021Q))
1912 return -EPROTONOSUPPORT;
1913
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001914 if (vlan || qos) {
1915 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001916 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001917 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001918 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001919 }
1920
Kalesh APabccf232014-07-17 16:20:24 +05301921 if (status) {
1922 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001923 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1924 status);
Kalesh APabccf232014-07-17 16:20:24 +05301925 return be_cmd_status(status);
1926 }
1927
1928 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301929 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001930}
1931
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001932static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1933 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001934{
1935 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301936 struct device *dev = &adapter->pdev->dev;
1937 int percent_rate, status = 0;
1938 u16 link_speed = 0;
1939 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001940
Sathya Perla11ac75e2011-12-13 00:58:50 +00001941 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001942 return -EPERM;
1943
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001944 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001945 return -EINVAL;
1946
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001947 if (min_tx_rate)
1948 return -EINVAL;
1949
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301950 if (!max_tx_rate)
1951 goto config_qos;
1952
1953 status = be_cmd_link_status_query(adapter, &link_speed,
1954 &link_status, 0);
1955 if (status)
1956 goto err;
1957
1958 if (!link_status) {
1959 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301960 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301961 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001962 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001963
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301964 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1965 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1966 link_speed);
1967 status = -EINVAL;
1968 goto err;
1969 }
1970
1971 /* On Skyhawk the QOS setting must be done only as a % value */
1972 percent_rate = link_speed / 100;
1973 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1974 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1975 percent_rate);
1976 status = -EINVAL;
1977 goto err;
1978 }
1979
1980config_qos:
1981 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001982 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301983 goto err;
1984
1985 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1986 return 0;
1987
1988err:
1989 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1990 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301991 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001992}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301993
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301994static int be_set_vf_link_state(struct net_device *netdev, int vf,
1995 int link_state)
1996{
1997 struct be_adapter *adapter = netdev_priv(netdev);
1998 int status;
1999
2000 if (!sriov_enabled(adapter))
2001 return -EPERM;
2002
2003 if (vf >= adapter->num_vfs)
2004 return -EINVAL;
2005
2006 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05302007 if (status) {
2008 dev_err(&adapter->pdev->dev,
2009 "Link state change on VF %d failed: %#x\n", vf, status);
2010 return be_cmd_status(status);
2011 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302012
Kalesh APabccf232014-07-17 16:20:24 +05302013 adapter->vf_cfg[vf].plink_tracking = link_state;
2014
2015 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302016}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002017
Kalesh APe7bcbd72015-05-06 05:30:32 -04002018static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2019{
2020 struct be_adapter *adapter = netdev_priv(netdev);
2021 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2022 u8 spoofchk;
2023 int status;
2024
2025 if (!sriov_enabled(adapter))
2026 return -EPERM;
2027
2028 if (vf >= adapter->num_vfs)
2029 return -EINVAL;
2030
2031 if (BEx_chip(adapter))
2032 return -EOPNOTSUPP;
2033
2034 if (enable == vf_cfg->spoofchk)
2035 return 0;
2036
2037 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2038
2039 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2040 0, spoofchk);
2041 if (status) {
2042 dev_err(&adapter->pdev->dev,
2043 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2044 return be_cmd_status(status);
2045 }
2046
2047 vf_cfg->spoofchk = enable;
2048 return 0;
2049}
2050
Sathya Perla2632baf2013-10-01 16:00:00 +05302051static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2052 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053{
Sathya Perla2632baf2013-10-01 16:00:00 +05302054 aic->rx_pkts_prev = rx_pkts;
2055 aic->tx_reqs_prev = tx_pkts;
2056 aic->jiffies = now;
2057}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002058
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002059static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302060{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002061 struct be_adapter *adapter = eqo->adapter;
2062 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302063 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302064 struct be_rx_obj *rxo;
2065 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002066 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302067 ulong now;
2068 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002069 int i;
2070
2071 aic = &adapter->aic_obj[eqo->idx];
2072 if (!aic->enable) {
2073 if (aic->jiffies)
2074 aic->jiffies = 0;
2075 eqd = aic->et_eqd;
2076 return eqd;
2077 }
2078
2079 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2080 do {
2081 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2082 rx_pkts += rxo->stats.rx_pkts;
2083 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2084 }
2085
2086 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2087 do {
2088 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2089 tx_pkts += txo->stats.tx_reqs;
2090 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2091 }
2092
2093 /* Skip, if wrapped around or first calculation */
2094 now = jiffies;
2095 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2096 rx_pkts < aic->rx_pkts_prev ||
2097 tx_pkts < aic->tx_reqs_prev) {
2098 be_aic_update(aic, rx_pkts, tx_pkts, now);
2099 return aic->prev_eqd;
2100 }
2101
2102 delta = jiffies_to_msecs(now - aic->jiffies);
2103 if (delta == 0)
2104 return aic->prev_eqd;
2105
2106 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2107 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2108 eqd = (pps / 15000) << 2;
2109
2110 if (eqd < 8)
2111 eqd = 0;
2112 eqd = min_t(u32, eqd, aic->max_eqd);
2113 eqd = max_t(u32, eqd, aic->min_eqd);
2114
2115 be_aic_update(aic, rx_pkts, tx_pkts, now);
2116
2117 return eqd;
2118}
2119
2120/* For Skyhawk-R only */
2121static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2122{
2123 struct be_adapter *adapter = eqo->adapter;
2124 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2125 ulong now = jiffies;
2126 int eqd;
2127 u32 mult_enc;
2128
2129 if (!aic->enable)
2130 return 0;
2131
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302132 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002133 eqd = aic->prev_eqd;
2134 else
2135 eqd = be_get_new_eqd(eqo);
2136
2137 if (eqd > 100)
2138 mult_enc = R2I_DLY_ENC_1;
2139 else if (eqd > 60)
2140 mult_enc = R2I_DLY_ENC_2;
2141 else if (eqd > 20)
2142 mult_enc = R2I_DLY_ENC_3;
2143 else
2144 mult_enc = R2I_DLY_ENC_0;
2145
2146 aic->prev_eqd = eqd;
2147
2148 return mult_enc;
2149}
2150
2151void be_eqd_update(struct be_adapter *adapter, bool force_update)
2152{
2153 struct be_set_eqd set_eqd[MAX_EVT_QS];
2154 struct be_aic_obj *aic;
2155 struct be_eq_obj *eqo;
2156 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157
Sathya Perla2632baf2013-10-01 16:00:00 +05302158 for_all_evt_queues(adapter, eqo, i) {
2159 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002160 eqd = be_get_new_eqd(eqo);
2161 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302162 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2163 set_eqd[num].eq_id = eqo->q.id;
2164 aic->prev_eqd = eqd;
2165 num++;
2166 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002167 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302168
2169 if (num)
2170 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002171}
2172
Sathya Perla3abcded2010-10-03 22:12:27 -07002173static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302174 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002175{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002176 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002177
Sathya Perlaab1594e2011-07-25 19:10:15 +00002178 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002179 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002180 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002181 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302182 if (rxcp->tunneled)
2183 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002184 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002185 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002186 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002187 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002188 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189}
2190
Sathya Perla2e588f82011-03-11 02:49:26 +00002191static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002192{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002193 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302194 * Also ignore ipcksm for ipv6 pkts
2195 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002196 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302197 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002198}
2199
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302200static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002204 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302205 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208 BUG_ON(!rx_page_info->page);
2209
Sathya Perlae50287b2014-03-04 12:14:38 +05302210 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002211 dma_unmap_page(&adapter->pdev->dev,
2212 dma_unmap_addr(rx_page_info, bus),
2213 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302214 rx_page_info->last_frag = false;
2215 } else {
2216 dma_sync_single_for_cpu(&adapter->pdev->dev,
2217 dma_unmap_addr(rx_page_info, bus),
2218 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002219 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302221 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222 atomic_dec(&rxq->used);
2223 return rx_page_info;
2224}
2225
2226/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002227static void be_rx_compl_discard(struct be_rx_obj *rxo,
2228 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002231 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002233 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302234 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002235 put_page(page_info->page);
2236 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237 }
2238}
2239
2240/*
2241 * skb_fill_rx_data forms a complete skb for an ether frame
2242 * indicated by rxcp.
2243 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002244static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2245 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002248 u16 i, j;
2249 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 u8 *start;
2251
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302252 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253 start = page_address(page_info->page) + page_info->page_offset;
2254 prefetch(start);
2255
2256 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002257 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259 skb->len = curr_frag_len;
2260 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002261 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262 /* Complete packet has now been moved to data */
2263 put_page(page_info->page);
2264 skb->data_len = 0;
2265 skb->tail += curr_frag_len;
2266 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002267 hdr_len = ETH_HLEN;
2268 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002270 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 skb_shinfo(skb)->frags[0].page_offset =
2272 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302273 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2274 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002276 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 skb->tail += hdr_len;
2278 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002279 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280
Sathya Perla2e588f82011-03-11 02:49:26 +00002281 if (rxcp->pkt_size <= rx_frag_size) {
2282 BUG_ON(rxcp->num_rcvd != 1);
2283 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284 }
2285
2286 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002287 remaining = rxcp->pkt_size - curr_frag_len;
2288 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302289 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002290 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002292 /* Coalesce all frags from the same physical page in one slot */
2293 if (page_info->page_offset == 0) {
2294 /* Fresh page */
2295 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002296 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002297 skb_shinfo(skb)->frags[j].page_offset =
2298 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002299 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002300 skb_shinfo(skb)->nr_frags++;
2301 } else {
2302 put_page(page_info->page);
2303 }
2304
Eric Dumazet9e903e02011-10-18 21:00:24 +00002305 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306 skb->len += curr_frag_len;
2307 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002308 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002309 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002310 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002311 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002312 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002313}
2314
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002315/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302316static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002317 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002319 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002320 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002322
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002323 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002324 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002325 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327 return;
2328 }
2329
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002332 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002333 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002334 else
2335 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002336
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002337 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002338 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002339 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002340 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302341
Tom Herbertb6c0e892014-08-27 21:27:17 -07002342 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302343 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002344
Jiri Pirko343e43c2011-08-25 02:50:51 +00002345 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002346 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002347
2348 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002349}
2350
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002351/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002352static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2353 struct napi_struct *napi,
2354 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002358 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002359 u16 remaining, curr_frag_len;
2360 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002361
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002362 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002363 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002365 return;
2366 }
2367
Sathya Perla2e588f82011-03-11 02:49:26 +00002368 remaining = rxcp->pkt_size;
2369 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302370 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002371
2372 curr_frag_len = min(remaining, rx_frag_size);
2373
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002374 /* Coalesce all frags from the same physical page in one slot */
2375 if (i == 0 || page_info->page_offset == 0) {
2376 /* First frag or Fresh page */
2377 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002378 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002379 skb_shinfo(skb)->frags[j].page_offset =
2380 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002381 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002382 } else {
2383 put_page(page_info->page);
2384 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002385 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002386 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388 memset(page_info, 0, sizeof(*page_info));
2389 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002390 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002392 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002393 skb->len = rxcp->pkt_size;
2394 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002395 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002396 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002397 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002398 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302399
Tom Herbertb6c0e892014-08-27 21:27:17 -07002400 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002401
Jiri Pirko343e43c2011-08-25 02:50:51 +00002402 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002403 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002404
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406}
2407
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002408static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2409 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002410{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302411 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2412 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2413 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2414 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2415 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2416 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2417 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2418 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2419 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2420 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2421 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002422 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302423 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2424 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002425 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302426 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302427 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302428 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002429}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002430
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2432 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002433{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302434 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2435 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2436 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2437 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2438 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2439 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2440 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2441 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2442 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2443 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2444 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002445 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302446 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2447 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002448 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302449 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2450 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002451}
2452
2453static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2454{
2455 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2456 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2457 struct be_adapter *adapter = rxo->adapter;
2458
2459 /* For checking the valid bit it is Ok to use either definition as the
2460 * valid bit is at the same position in both v0 and v1 Rx compl */
2461 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002462 return NULL;
2463
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002464 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002465 be_dws_le_to_cpu(compl, sizeof(*compl));
2466
2467 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002469 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002470 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002471
Somnath Koture38b1702013-05-29 22:55:56 +00002472 if (rxcp->ip_frag)
2473 rxcp->l4_csum = 0;
2474
Sathya Perla15d72182011-03-21 20:49:26 +00002475 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302476 /* In QNQ modes, if qnq bit is not set, then the packet was
2477 * tagged only with the transparent outer vlan-tag and must
2478 * not be treated as a vlan packet by host
2479 */
2480 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002481 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002482
Sathya Perla15d72182011-03-21 20:49:26 +00002483 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002484 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002485
Somnath Kotur939cf302011-08-18 21:51:49 -07002486 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302487 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002488 rxcp->vlanf = 0;
2489 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002490
2491 /* As the compl has been parsed, reset it; we wont touch it again */
2492 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002493
Sathya Perla3abcded2010-10-03 22:12:27 -07002494 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495 return rxcp;
2496}
2497
Eric Dumazet1829b082011-03-01 05:48:12 +00002498static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002500 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002501
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002502 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002503 gfp |= __GFP_COMP;
2504 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002505}
2506
2507/*
2508 * Allocate a page, split it to fragments of size rx_frag_size and post as
2509 * receive buffers to BE
2510 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302511static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512{
Sathya Perla3abcded2010-10-03 22:12:27 -07002513 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002514 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002515 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002517 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002518 struct be_eth_rx_d *rxd;
2519 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302520 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521
Sathya Perla3abcded2010-10-03 22:12:27 -07002522 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302523 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002524 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002525 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002526 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002527 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528 break;
2529 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002530 page_dmaaddr = dma_map_page(dev, pagep, 0,
2531 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002532 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002533 if (dma_mapping_error(dev, page_dmaaddr)) {
2534 put_page(pagep);
2535 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302536 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002537 break;
2538 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302539 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540 } else {
2541 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302542 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002543 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302544 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002546
2547 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302548 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002549 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2550 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002551
2552 /* Any space left in the current big page for another frag? */
2553 if ((page_offset + rx_frag_size + rx_frag_size) >
2554 adapter->big_page_size) {
2555 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302556 page_info->last_frag = true;
2557 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2558 } else {
2559 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002561
2562 prev_page_info = page_info;
2563 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002564 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002565 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302566
2567 /* Mark the last frag of a page when we break out of the above loop
2568 * with no more slots available in the RXQ
2569 */
2570 if (pagep) {
2571 prev_page_info->last_frag = true;
2572 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2573 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002574
2575 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002576 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302577 if (rxo->rx_post_starved)
2578 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302579 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002580 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302581 be_rxq_notify(adapter, rxq->id, notify);
2582 posted -= notify;
2583 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002584 } else if (atomic_read(&rxq->used) == 0) {
2585 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002586 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002587 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002588}
2589
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302590static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302592 struct be_queue_info *tx_cq = &txo->cq;
2593 struct be_tx_compl_info *txcp = &txo->txcp;
2594 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002595
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302596 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597 return NULL;
2598
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302599 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002600 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302601 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002602
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302603 txcp->status = GET_TX_COMPL_BITS(status, compl);
2604 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002605
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302606 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607 queue_tail_inc(tx_cq);
2608 return txcp;
2609}
2610
Sathya Perla3c8def92011-06-12 20:01:58 +00002611static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302612 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002613{
Sathya Perla3c8def92011-06-12 20:01:58 +00002614 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002615 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002616 struct sk_buff *skb = NULL;
2617 bool unmap_skb_hdr = false;
2618 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302619 u16 num_wrbs = 0;
2620 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002621
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002622 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002623 if (sent_skbs[txq->tail]) {
2624 /* Free skb from prev req */
2625 if (skb)
2626 dev_consume_skb_any(skb);
2627 skb = sent_skbs[txq->tail];
2628 sent_skbs[txq->tail] = NULL;
2629 queue_tail_inc(txq); /* skip hdr wrb */
2630 num_wrbs++;
2631 unmap_skb_hdr = true;
2632 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002633 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002634 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002635 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002636 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002637 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002639 num_wrbs++;
2640 } while (frag_index != last_index);
2641 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002642
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002643 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644}
2645
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002646/* Return the number of events in the event queue */
2647static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002648{
2649 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002650 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002651
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002652 do {
2653 eqe = queue_tail_node(&eqo->q);
2654 if (eqe->evt == 0)
2655 break;
2656
2657 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002658 eqe->evt = 0;
2659 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002660 queue_tail_inc(&eqo->q);
2661 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002662
2663 return num;
2664}
2665
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002666/* Leaves the EQ is disarmed state */
2667static void be_eq_clean(struct be_eq_obj *eqo)
2668{
2669 int num = events_get(eqo);
2670
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002671 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002672}
2673
Kalesh AP99b44302015-08-05 03:27:49 -04002674/* Free posted rx buffers that were not used */
2675static void be_rxq_clean(struct be_rx_obj *rxo)
2676{
2677 struct be_queue_info *rxq = &rxo->q;
2678 struct be_rx_page_info *page_info;
2679
2680 while (atomic_read(&rxq->used) > 0) {
2681 page_info = get_rx_page_info(rxo);
2682 put_page(page_info->page);
2683 memset(page_info, 0, sizeof(*page_info));
2684 }
2685 BUG_ON(atomic_read(&rxq->used));
2686 rxq->tail = 0;
2687 rxq->head = 0;
2688}
2689
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002690static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691{
Sathya Perla3abcded2010-10-03 22:12:27 -07002692 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002693 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002694 struct be_adapter *adapter = rxo->adapter;
2695 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696
Sathya Perlad23e9462012-12-17 19:38:51 +00002697 /* Consume pending rx completions.
2698 * Wait for the flush completion (identified by zero num_rcvd)
2699 * to arrive. Notify CQ even when there are no more CQ entries
2700 * for HW to flush partially coalesced CQ entries.
2701 * In Lancer, there is no need to wait for flush compl.
2702 */
2703 for (;;) {
2704 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302705 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002706 if (lancer_chip(adapter))
2707 break;
2708
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302709 if (flush_wait++ > 50 ||
2710 be_check_error(adapter,
2711 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002712 dev_warn(&adapter->pdev->dev,
2713 "did not receive flush compl\n");
2714 break;
2715 }
2716 be_cq_notify(adapter, rx_cq->id, true, 0);
2717 mdelay(1);
2718 } else {
2719 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002720 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002721 if (rxcp->num_rcvd == 0)
2722 break;
2723 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724 }
2725
Sathya Perlad23e9462012-12-17 19:38:51 +00002726 /* After cleanup, leave the CQ in unarmed state */
2727 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002728}
2729
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002730static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002732 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302733 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302734 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002735 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302736 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302737 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002738 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002739
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302740 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002741 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002742 pending_txqs = adapter->num_tx_qs;
2743
2744 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302745 cmpl = 0;
2746 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002747 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302748 while ((txcp = be_tx_compl_get(txo))) {
2749 num_wrbs +=
2750 be_tx_compl_process(adapter, txo,
2751 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002752 cmpl++;
2753 }
2754 if (cmpl) {
2755 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2756 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302757 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002758 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302759 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002760 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002761 }
2762
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302763 if (pending_txqs == 0 || ++timeo > 10 ||
2764 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002765 break;
2766
2767 mdelay(1);
2768 } while (true);
2769
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002770 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002771 for_all_tx_queues(adapter, txo, i) {
2772 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002773
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002774 if (atomic_read(&txq->used)) {
2775 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2776 i, atomic_read(&txq->used));
2777 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002778 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002779 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2780 txq->len);
2781 /* Use the tx-compl process logic to handle requests
2782 * that were not sent to the HW.
2783 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002784 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2785 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002786 BUG_ON(atomic_read(&txq->used));
2787 txo->pend_wrb_cnt = 0;
2788 /* Since hw was never notified of these requests,
2789 * reset TXQ indices
2790 */
2791 txq->head = notified_idx;
2792 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002793 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002794 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002795}
2796
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002797static void be_evt_queues_destroy(struct be_adapter *adapter)
2798{
2799 struct be_eq_obj *eqo;
2800 int i;
2801
2802 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002803 if (eqo->q.created) {
2804 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002805 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302806 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302807 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002808 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002809 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002810 be_queue_free(adapter, &eqo->q);
2811 }
2812}
2813
2814static int be_evt_queues_create(struct be_adapter *adapter)
2815{
2816 struct be_queue_info *eq;
2817 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302818 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002819 int i, rc;
2820
Sathya Perlae2617682016-06-22 08:54:54 -04002821 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302822 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002823 max(adapter->cfg_num_rx_irqs,
2824 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825
2826 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302827 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002828
Sathya Perla2632baf2013-10-01 16:00:00 +05302829 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002830 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002831 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302832 aic->max_eqd = BE_MAX_EQD;
2833 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834
2835 eq = &eqo->q;
2836 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302837 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002838 if (rc)
2839 return rc;
2840
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302841 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002842 if (rc)
2843 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002844
2845 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2846 return -ENOMEM;
2847 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2848 eqo->affinity_mask);
2849 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2850 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002851 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002852 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002853}
2854
Sathya Perla5fb379e2009-06-18 00:02:59 +00002855static void be_mcc_queues_destroy(struct be_adapter *adapter)
2856{
2857 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002858
Sathya Perla8788fdc2009-07-27 22:52:03 +00002859 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002860 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002861 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002862 be_queue_free(adapter, q);
2863
Sathya Perla8788fdc2009-07-27 22:52:03 +00002864 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002865 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002866 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002867 be_queue_free(adapter, q);
2868}
2869
2870/* Must be called only after TX qs are created as MCC shares TX EQ */
2871static int be_mcc_queues_create(struct be_adapter *adapter)
2872{
2873 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002874
Sathya Perla8788fdc2009-07-27 22:52:03 +00002875 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002876 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302877 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002878 goto err;
2879
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002880 /* Use the default EQ for MCC completions */
2881 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002882 goto mcc_cq_free;
2883
Sathya Perla8788fdc2009-07-27 22:52:03 +00002884 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002885 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2886 goto mcc_cq_destroy;
2887
Sathya Perla8788fdc2009-07-27 22:52:03 +00002888 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002889 goto mcc_q_free;
2890
2891 return 0;
2892
2893mcc_q_free:
2894 be_queue_free(adapter, q);
2895mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002896 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002897mcc_cq_free:
2898 be_queue_free(adapter, cq);
2899err:
2900 return -1;
2901}
2902
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002903static void be_tx_queues_destroy(struct be_adapter *adapter)
2904{
2905 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002906 struct be_tx_obj *txo;
2907 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002908
Sathya Perla3c8def92011-06-12 20:01:58 +00002909 for_all_tx_queues(adapter, txo, i) {
2910 q = &txo->q;
2911 if (q->created)
2912 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2913 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002914
Sathya Perla3c8def92011-06-12 20:01:58 +00002915 q = &txo->cq;
2916 if (q->created)
2917 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2918 be_queue_free(adapter, q);
2919 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002920}
2921
Sathya Perla77071332013-08-27 16:57:34 +05302922static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002923{
Sathya Perla73f394e2015-03-26 03:05:09 -04002924 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002925 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002926 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302927 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002928
Sathya Perlae2617682016-06-22 08:54:54 -04002929 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002930
Sathya Perla3c8def92011-06-12 20:01:58 +00002931 for_all_tx_queues(adapter, txo, i) {
2932 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002933 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2934 sizeof(struct be_eth_tx_compl));
2935 if (status)
2936 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937
John Stultz827da442013-10-07 15:51:58 -07002938 u64_stats_init(&txo->stats.sync);
2939 u64_stats_init(&txo->stats.sync_compl);
2940
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002941 /* If num_evt_qs is less than num_tx_qs, then more than
2942 * one txq share an eq
2943 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002944 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2945 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002946 if (status)
2947 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002948
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002949 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2950 sizeof(struct be_eth_wrb));
2951 if (status)
2952 return status;
2953
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002954 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002955 if (status)
2956 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002957
2958 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2959 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 }
2961
Sathya Perlad3791422012-09-28 04:39:44 +00002962 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2963 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002964 return 0;
2965}
2966
2967static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968{
2969 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002970 struct be_rx_obj *rxo;
2971 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002972
Sathya Perla3abcded2010-10-03 22:12:27 -07002973 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002974 q = &rxo->cq;
2975 if (q->created)
2976 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2977 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002978 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002979}
2980
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002981static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002982{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002983 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002984 struct be_rx_obj *rxo;
2985 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002986
Sathya Perlae2617682016-06-22 08:54:54 -04002987 adapter->num_rss_qs =
2988 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302989
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002990 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04002991 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002992 adapter->num_rss_qs = 0;
2993
2994 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2995
2996 /* When the interface is not capable of RSS rings (and there is no
2997 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002998 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002999 if (adapter->num_rx_qs == 0)
3000 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303001
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003002 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07003003 for_all_rx_queues(adapter, rxo, i) {
3004 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07003005 cq = &rxo->cq;
3006 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05303007 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07003008 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003009 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010
John Stultz827da442013-10-07 15:51:58 -07003011 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003012 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3013 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07003014 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003015 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07003016 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003017
Sathya Perlad3791422012-09-28 04:39:44 +00003018 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003019 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003020 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00003021}
3022
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023static irqreturn_t be_intx(int irq, void *dev)
3024{
Sathya Perlae49cc342012-11-27 19:50:02 +00003025 struct be_eq_obj *eqo = dev;
3026 struct be_adapter *adapter = eqo->adapter;
3027 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003028
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003029 /* IRQ is not expected when NAPI is scheduled as the EQ
3030 * will not be armed.
3031 * But, this can happen on Lancer INTx where it takes
3032 * a while to de-assert INTx or in BE2 where occasionaly
3033 * an interrupt may be raised even when EQ is unarmed.
3034 * If NAPI is already scheduled, then counting & notifying
3035 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00003036 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003037 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003038 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003039 __napi_schedule(&eqo->napi);
3040 if (num_evts)
3041 eqo->spurious_intr = 0;
3042 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003043 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003044
3045 /* Return IRQ_HANDLED only for the the first spurious intr
3046 * after a valid intr to stop the kernel from branding
3047 * this irq as a bad one!
3048 */
3049 if (num_evts || eqo->spurious_intr++ == 0)
3050 return IRQ_HANDLED;
3051 else
3052 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053}
3054
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003055static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003057 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003058
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003059 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003060 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003061 return IRQ_HANDLED;
3062}
3063
Sathya Perla2e588f82011-03-11 02:49:26 +00003064static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003065{
Somnath Koture38b1702013-05-29 22:55:56 +00003066 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067}
3068
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003069static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05303070 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071{
Sathya Perla3abcded2010-10-03 22:12:27 -07003072 struct be_adapter *adapter = rxo->adapter;
3073 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003074 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003075 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303076 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003077
3078 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003079 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080 if (!rxcp)
3081 break;
3082
Sathya Perla12004ae2011-08-02 19:57:46 +00003083 /* Is it a flush compl that has no data */
3084 if (unlikely(rxcp->num_rcvd == 0))
3085 goto loop_continue;
3086
3087 /* Discard compl with partial DMA Lancer B0 */
3088 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003089 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003090 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003091 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003092
Sathya Perla12004ae2011-08-02 19:57:46 +00003093 /* On BE drop pkts that arrive due to imperfect filtering in
3094 * promiscuous mode on some skews
3095 */
3096 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303097 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003098 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003099 goto loop_continue;
3100 }
3101
Sathya Perla6384a4d2013-10-25 10:40:16 +05303102 /* Don't do gro when we're busy_polling */
3103 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003104 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003105 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303106 be_rx_compl_process(rxo, napi, rxcp);
3107
Sathya Perla12004ae2011-08-02 19:57:46 +00003108loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303109 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003110 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003111 }
3112
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003113 if (work_done) {
3114 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003115
Sathya Perla6384a4d2013-10-25 10:40:16 +05303116 /* When an rx-obj gets into post_starved state, just
3117 * let be_worker do the posting.
3118 */
3119 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3120 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303121 be_post_rx_frags(rxo, GFP_ATOMIC,
3122 max_t(u32, MAX_RX_POST,
3123 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003125
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126 return work_done;
3127}
3128
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303129static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303130{
3131 switch (status) {
3132 case BE_TX_COMP_HDR_PARSE_ERR:
3133 tx_stats(txo)->tx_hdr_parse_err++;
3134 break;
3135 case BE_TX_COMP_NDMA_ERR:
3136 tx_stats(txo)->tx_dma_err++;
3137 break;
3138 case BE_TX_COMP_ACL_ERR:
3139 tx_stats(txo)->tx_spoof_check_err++;
3140 break;
3141 }
3142}
3143
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303144static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303145{
3146 switch (status) {
3147 case LANCER_TX_COMP_LSO_ERR:
3148 tx_stats(txo)->tx_tso_err++;
3149 break;
3150 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3151 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3152 tx_stats(txo)->tx_spoof_check_err++;
3153 break;
3154 case LANCER_TX_COMP_QINQ_ERR:
3155 tx_stats(txo)->tx_qinq_err++;
3156 break;
3157 case LANCER_TX_COMP_PARITY_ERR:
3158 tx_stats(txo)->tx_internal_parity_err++;
3159 break;
3160 case LANCER_TX_COMP_DMA_ERR:
3161 tx_stats(txo)->tx_dma_err++;
3162 break;
3163 }
3164}
3165
Sathya Perlac8f64612014-09-02 09:56:55 +05303166static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3167 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003168{
Sathya Perlac8f64612014-09-02 09:56:55 +05303169 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303170 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003171
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303172 while ((txcp = be_tx_compl_get(txo))) {
3173 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303174 work_done++;
3175
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303176 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303177 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303178 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303179 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303180 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303181 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003182 }
3183
3184 if (work_done) {
3185 be_cq_notify(adapter, txo->cq.id, true, work_done);
3186 atomic_sub(num_wrbs, &txo->q.used);
3187
3188 /* As Tx wrbs have been freed up, wake up netdev queue
3189 * if it was stopped due to lack of tx wrbs. */
3190 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303191 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003192 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003193 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003194
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003195 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3196 tx_stats(txo)->tx_compl += work_done;
3197 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3198 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003199}
Sathya Perla3c8def92011-06-12 20:01:58 +00003200
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003201#ifdef CONFIG_NET_RX_BUSY_POLL
3202static inline bool be_lock_napi(struct be_eq_obj *eqo)
3203{
3204 bool status = true;
3205
3206 spin_lock(&eqo->lock); /* BH is already disabled */
3207 if (eqo->state & BE_EQ_LOCKED) {
3208 WARN_ON(eqo->state & BE_EQ_NAPI);
3209 eqo->state |= BE_EQ_NAPI_YIELD;
3210 status = false;
3211 } else {
3212 eqo->state = BE_EQ_NAPI;
3213 }
3214 spin_unlock(&eqo->lock);
3215 return status;
3216}
3217
3218static inline void be_unlock_napi(struct be_eq_obj *eqo)
3219{
3220 spin_lock(&eqo->lock); /* BH is already disabled */
3221
3222 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3223 eqo->state = BE_EQ_IDLE;
3224
3225 spin_unlock(&eqo->lock);
3226}
3227
3228static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3229{
3230 bool status = true;
3231
3232 spin_lock_bh(&eqo->lock);
3233 if (eqo->state & BE_EQ_LOCKED) {
3234 eqo->state |= BE_EQ_POLL_YIELD;
3235 status = false;
3236 } else {
3237 eqo->state |= BE_EQ_POLL;
3238 }
3239 spin_unlock_bh(&eqo->lock);
3240 return status;
3241}
3242
3243static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3244{
3245 spin_lock_bh(&eqo->lock);
3246
3247 WARN_ON(eqo->state & (BE_EQ_NAPI));
3248 eqo->state = BE_EQ_IDLE;
3249
3250 spin_unlock_bh(&eqo->lock);
3251}
3252
3253static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3254{
3255 spin_lock_init(&eqo->lock);
3256 eqo->state = BE_EQ_IDLE;
3257}
3258
3259static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3260{
3261 local_bh_disable();
3262
3263 /* It's enough to just acquire napi lock on the eqo to stop
3264 * be_busy_poll() from processing any queueus.
3265 */
3266 while (!be_lock_napi(eqo))
3267 mdelay(1);
3268
3269 local_bh_enable();
3270}
3271
3272#else /* CONFIG_NET_RX_BUSY_POLL */
3273
3274static inline bool be_lock_napi(struct be_eq_obj *eqo)
3275{
3276 return true;
3277}
3278
3279static inline void be_unlock_napi(struct be_eq_obj *eqo)
3280{
3281}
3282
3283static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3284{
3285 return false;
3286}
3287
3288static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3289{
3290}
3291
3292static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3293{
3294}
3295
3296static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3297{
3298}
3299#endif /* CONFIG_NET_RX_BUSY_POLL */
3300
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303301int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003302{
3303 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3304 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003305 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303306 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303307 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003308 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003309
Sathya Perla0b545a62012-11-23 00:27:18 +00003310 num_evts = events_get(eqo);
3311
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303312 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3313 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314
Sathya Perla6384a4d2013-10-25 10:40:16 +05303315 if (be_lock_napi(eqo)) {
3316 /* This loop will iterate twice for EQ0 in which
3317 * completions of the last RXQ (default one) are also processed
3318 * For other EQs the loop iterates only once
3319 */
3320 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3321 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3322 max_work = max(work, max_work);
3323 }
3324 be_unlock_napi(eqo);
3325 } else {
3326 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003327 }
3328
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003329 if (is_mcc_eqo(eqo))
3330 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003331
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003332 if (max_work < budget) {
3333 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003334
3335 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3336 * delay via a delay multiplier encoding value
3337 */
3338 if (skyhawk_chip(adapter))
3339 mult_enc = be_get_eq_delay_mult_enc(eqo);
3340
3341 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3342 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003343 } else {
3344 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003345 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003346 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003347 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003348}
3349
Sathya Perla6384a4d2013-10-25 10:40:16 +05303350#ifdef CONFIG_NET_RX_BUSY_POLL
3351static int be_busy_poll(struct napi_struct *napi)
3352{
3353 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3354 struct be_adapter *adapter = eqo->adapter;
3355 struct be_rx_obj *rxo;
3356 int i, work = 0;
3357
3358 if (!be_lock_busy_poll(eqo))
3359 return LL_FLUSH_BUSY;
3360
3361 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3362 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3363 if (work)
3364 break;
3365 }
3366
3367 be_unlock_busy_poll(eqo);
3368 return work;
3369}
3370#endif
3371
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003372void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003373{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003374 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3375 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003376 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303377 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003378
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303379 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003380 return;
3381
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003382 if (lancer_chip(adapter)) {
3383 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3384 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303385 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003386 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303387 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003388 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303389 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303390 /* Do not log error messages if its a FW reset */
3391 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3392 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3393 dev_info(dev, "Firmware update in progress\n");
3394 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303395 dev_err(dev, "Error detected in the card\n");
3396 dev_err(dev, "ERR: sliport status 0x%x\n",
3397 sliport_status);
3398 dev_err(dev, "ERR: sliport error1 0x%x\n",
3399 sliport_err1);
3400 dev_err(dev, "ERR: sliport error2 0x%x\n",
3401 sliport_err2);
3402 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003403 }
3404 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003405 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3406 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3407 ue_lo_mask = ioread32(adapter->pcicfg +
3408 PCICFG_UE_STATUS_LOW_MASK);
3409 ue_hi_mask = ioread32(adapter->pcicfg +
3410 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003411
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003412 ue_lo = (ue_lo & ~ue_lo_mask);
3413 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003414
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303415 /* On certain platforms BE hardware can indicate spurious UEs.
3416 * Allow HW to stop working completely in case of a real UE.
3417 * Hence not setting the hw_error for UE detection.
3418 */
3419
3420 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303421 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303422 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303423 be_set_error(adapter, BE_ERROR_UE);
3424
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303425 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3426 if (ue_lo & 1)
3427 dev_err(dev, "UE: %s bit set\n",
3428 ue_status_low_desc[i]);
3429 }
3430 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3431 if (ue_hi & 1)
3432 dev_err(dev, "UE: %s bit set\n",
3433 ue_status_hi_desc[i]);
3434 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303435 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003436 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003437}
3438
Sathya Perla8d56ff12009-11-22 22:02:26 +00003439static void be_msix_disable(struct be_adapter *adapter)
3440{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003441 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003442 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003443 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303444 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003445 }
3446}
3447
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003448static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003450 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003451 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003452 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003453
Sathya Perlace7faf02016-06-22 08:54:53 -04003454 /* If RoCE is supported, program the max number of vectors that
3455 * could be used for NIC and RoCE, else, just program the number
3456 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303457 */
Sathya Perlae2617682016-06-22 08:54:54 -04003458 if (be_roce_supported(adapter)) {
3459 max_roce_eqs =
3460 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3461 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3462 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3463 } else {
3464 num_vec = max(adapter->cfg_num_rx_irqs,
3465 adapter->cfg_num_tx_irqs);
3466 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003467
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003468 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003469 adapter->msix_entries[i].entry = i;
3470
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003471 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3472 MIN_MSIX_VECTORS, num_vec);
3473 if (num_vec < 0)
3474 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003475
Sathya Perla92bf14a2013-08-27 16:57:32 +05303476 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3477 adapter->num_msix_roce_vec = num_vec / 2;
3478 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3479 adapter->num_msix_roce_vec);
3480 }
3481
3482 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3483
3484 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3485 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003486 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003487
3488fail:
3489 dev_warn(dev, "MSIx enable failed\n");
3490
3491 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003492 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003493 return num_vec;
3494 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003495}
3496
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003497static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303498 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003499{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303500 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003501}
3502
3503static int be_msix_register(struct be_adapter *adapter)
3504{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003505 struct net_device *netdev = adapter->netdev;
3506 struct be_eq_obj *eqo;
3507 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003508
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003509 for_all_evt_queues(adapter, eqo, i) {
3510 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3511 vec = be_msix_vec_get(adapter, eqo);
3512 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003513 if (status)
3514 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003515
3516 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003517 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003518
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003519 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003520err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303521 for (i--; i >= 0; i--) {
3522 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003523 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303524 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003525 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303526 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003527 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003528 return status;
3529}
3530
3531static int be_irq_register(struct be_adapter *adapter)
3532{
3533 struct net_device *netdev = adapter->netdev;
3534 int status;
3535
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003536 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003537 status = be_msix_register(adapter);
3538 if (status == 0)
3539 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003540 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003541 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003542 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003543 }
3544
Sathya Perlae49cc342012-11-27 19:50:02 +00003545 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003546 netdev->irq = adapter->pdev->irq;
3547 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003548 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003549 if (status) {
3550 dev_err(&adapter->pdev->dev,
3551 "INTx request IRQ failed - err %d\n", status);
3552 return status;
3553 }
3554done:
3555 adapter->isr_registered = true;
3556 return 0;
3557}
3558
3559static void be_irq_unregister(struct be_adapter *adapter)
3560{
3561 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003562 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003563 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003564
3565 if (!adapter->isr_registered)
3566 return;
3567
3568 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003569 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003570 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003571 goto done;
3572 }
3573
3574 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003575 for_all_evt_queues(adapter, eqo, i) {
3576 vec = be_msix_vec_get(adapter, eqo);
3577 irq_set_affinity_hint(vec, NULL);
3578 free_irq(vec, eqo);
3579 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003580
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003581done:
3582 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003583}
3584
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003585static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003586{
Ajit Khaparde62219062016-02-10 22:45:53 +05303587 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003588 struct be_queue_info *q;
3589 struct be_rx_obj *rxo;
3590 int i;
3591
3592 for_all_rx_queues(adapter, rxo, i) {
3593 q = &rxo->q;
3594 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003595 /* If RXQs are destroyed while in an "out of buffer"
3596 * state, there is a possibility of an HW stall on
3597 * Lancer. So, post 64 buffers to each queue to relieve
3598 * the "out of buffer" condition.
3599 * Make sure there's space in the RXQ before posting.
3600 */
3601 if (lancer_chip(adapter)) {
3602 be_rx_cq_clean(rxo);
3603 if (atomic_read(&q->used) == 0)
3604 be_post_rx_frags(rxo, GFP_KERNEL,
3605 MAX_RX_POST);
3606 }
3607
Sathya Perla482c9e72011-06-29 23:33:17 +00003608 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003609 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003610 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003611 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003612 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003613 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303614
3615 if (rss->rss_flags) {
3616 rss->rss_flags = RSS_ENABLE_NONE;
3617 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3618 128, rss->rss_hkey);
3619 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003620}
3621
Kalesh APbcc84142015-08-05 03:27:48 -04003622static void be_disable_if_filters(struct be_adapter *adapter)
3623{
Suresh Reddy988d44b2016-09-07 19:57:52 +05303624 be_dev_mac_del(adapter, adapter->pmac_id[0]);
Kalesh APbcc84142015-08-05 03:27:48 -04003625 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003626 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003627
3628 /* The IFACE flags are enabled in the open path and cleared
3629 * in the close path. When a VF gets detached from the host and
3630 * assigned to a VM the following happens:
3631 * - VF's IFACE flags get cleared in the detach path
3632 * - IFACE create is issued by the VF in the attach path
3633 * Due to a bug in the BE3/Skyhawk-R FW
3634 * (Lancer FW doesn't have the bug), the IFACE capability flags
3635 * specified along with the IFACE create cmd issued by a VF are not
3636 * honoured by FW. As a consequence, if a *new* driver
3637 * (that enables/disables IFACE flags in open/close)
3638 * is loaded in the host and an *old* driver is * used by a VM/VF,
3639 * the IFACE gets created *without* the needed flags.
3640 * To avoid this, disable RX-filter flags only for Lancer.
3641 */
3642 if (lancer_chip(adapter)) {
3643 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3644 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3645 }
3646}
3647
Sathya Perla889cd4b2010-05-30 23:33:45 +00003648static int be_close(struct net_device *netdev)
3649{
3650 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003651 struct be_eq_obj *eqo;
3652 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003653
Kalesh APe1ad8e32014-04-14 16:12:41 +05303654 /* This protection is needed as be_close() may be called even when the
3655 * adapter is in cleared state (after eeh perm failure)
3656 */
3657 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3658 return 0;
3659
Sathya Perlab7172412016-07-27 05:26:18 -04003660 /* Before attempting cleanup ensure all the pending cmds in the
3661 * config_wq have finished execution
3662 */
3663 flush_workqueue(be_wq);
3664
Kalesh APbcc84142015-08-05 03:27:48 -04003665 be_disable_if_filters(adapter);
3666
Ivan Veceradff345c52013-11-27 08:59:32 +01003667 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3668 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003669 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303670 be_disable_busy_poll(eqo);
3671 }
David S. Miller71237b62013-11-28 18:53:36 -05003672 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003673 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003674
3675 be_async_mcc_disable(adapter);
3676
3677 /* Wait for all pending tx completions to arrive so that
3678 * all tx skbs are freed.
3679 */
Sathya Perlafba87552013-05-08 02:05:50 +00003680 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303681 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003682
3683 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003684
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003685 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003686 if (msix_enabled(adapter))
3687 synchronize_irq(be_msix_vec_get(adapter, eqo));
3688 else
3689 synchronize_irq(netdev->irq);
3690 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003691 }
3692
Sathya Perla889cd4b2010-05-30 23:33:45 +00003693 be_irq_unregister(adapter);
3694
Sathya Perla482c9e72011-06-29 23:33:17 +00003695 return 0;
3696}
3697
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003698static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003699{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003700 struct rss_info *rss = &adapter->rss_info;
3701 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003702 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003703 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003704
3705 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003706 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3707 sizeof(struct be_eth_rx_d));
3708 if (rc)
3709 return rc;
3710 }
3711
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003712 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3713 rxo = default_rxo(adapter);
3714 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3715 rx_frag_size, adapter->if_handle,
3716 false, &rxo->rss_id);
3717 if (rc)
3718 return rc;
3719 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003720
3721 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003722 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003723 rx_frag_size, adapter->if_handle,
3724 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003725 if (rc)
3726 return rc;
3727 }
3728
3729 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003730 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003731 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303732 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003733 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303734 rss->rsstable[j + i] = rxo->rss_id;
3735 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003736 }
3737 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303738 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3739 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003740
3741 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303742 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3743 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303744
3745 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3746 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3747 RSS_INDIR_TABLE_LEN, rss_key);
3748 if (rc) {
3749 rss->rss_flags = RSS_ENABLE_NONE;
3750 return rc;
3751 }
3752
3753 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303754 } else {
3755 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303756 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303757 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003758
Venkata Duvvurue2557872014-04-21 15:38:00 +05303759
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003760 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3761 * which is a queue empty condition
3762 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003763 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003764 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3765
Sathya Perla889cd4b2010-05-30 23:33:45 +00003766 return 0;
3767}
3768
Kalesh APbcc84142015-08-05 03:27:48 -04003769static int be_enable_if_filters(struct be_adapter *adapter)
3770{
3771 int status;
3772
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003773 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003774 if (status)
3775 return status;
3776
3777 /* For BE3 VFs, the PF programs the initial MAC address */
3778 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05303779 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003780 if (status)
3781 return status;
Suresh Reddyc27ebf52016-09-07 19:57:53 +05303782 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003783 }
3784
3785 if (adapter->vlans_added)
3786 be_vid_config(adapter);
3787
Sathya Perlab7172412016-07-27 05:26:18 -04003788 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003789
3790 return 0;
3791}
3792
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003793static int be_open(struct net_device *netdev)
3794{
3795 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003796 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003797 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003798 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003799 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003800 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003801
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003802 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003803 if (status)
3804 goto err;
3805
Kalesh APbcc84142015-08-05 03:27:48 -04003806 status = be_enable_if_filters(adapter);
3807 if (status)
3808 goto err;
3809
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003810 status = be_irq_register(adapter);
3811 if (status)
3812 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003813
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003814 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003815 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003816
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003817 for_all_tx_queues(adapter, txo, i)
3818 be_cq_notify(adapter, txo->cq.id, true, 0);
3819
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003820 be_async_mcc_enable(adapter);
3821
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003822 for_all_evt_queues(adapter, eqo, i) {
3823 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303824 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003825 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003826 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003827 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003828
Sathya Perla323ff712012-09-28 04:39:43 +00003829 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003830 if (!status)
3831 be_link_status_update(adapter, link_status);
3832
Sathya Perlafba87552013-05-08 02:05:50 +00003833 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303834 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003835 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303836
Sathya Perla889cd4b2010-05-30 23:33:45 +00003837 return 0;
3838err:
3839 be_close(adapter->netdev);
3840 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003841}
3842
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003843static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3844{
3845 u32 addr;
3846
3847 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3848
3849 mac[5] = (u8)(addr & 0xFF);
3850 mac[4] = (u8)((addr >> 8) & 0xFF);
3851 mac[3] = (u8)((addr >> 16) & 0xFF);
3852 /* Use the OUI from the current MAC address */
3853 memcpy(mac, adapter->netdev->dev_addr, 3);
3854}
3855
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003856/*
3857 * Generate a seed MAC address from the PF MAC Address using jhash.
3858 * MAC Address for VFs are assigned incrementally starting from the seed.
3859 * These addresses are programmed in the ASIC by the PF and the VF driver
3860 * queries for the MAC address during its probe.
3861 */
Sathya Perla4c876612013-02-03 20:30:11 +00003862static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003863{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003864 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003865 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003866 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003867 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003868
3869 be_vf_eth_addr_generate(adapter, mac);
3870
Sathya Perla11ac75e2011-12-13 00:58:50 +00003871 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303872 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003873 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003874 vf_cfg->if_handle,
3875 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303876 else
3877 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3878 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003879
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003880 if (status)
3881 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303882 "Mac address assignment failed for VF %d\n",
3883 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003884 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003885 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003886
3887 mac[5] += 1;
3888 }
3889 return status;
3890}
3891
Sathya Perla4c876612013-02-03 20:30:11 +00003892static int be_vfs_mac_query(struct be_adapter *adapter)
3893{
3894 int status, vf;
3895 u8 mac[ETH_ALEN];
3896 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003897
3898 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303899 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3900 mac, vf_cfg->if_handle,
3901 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003902 if (status)
3903 return status;
3904 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3905 }
3906 return 0;
3907}
3908
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003909static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003910{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003911 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003912 u32 vf;
3913
Sathya Perla257a3fe2013-06-14 15:54:51 +05303914 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003915 dev_warn(&adapter->pdev->dev,
3916 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003917 goto done;
3918 }
3919
Sathya Perlab4c1df92013-05-08 02:05:47 +00003920 pci_disable_sriov(adapter->pdev);
3921
Sathya Perla11ac75e2011-12-13 00:58:50 +00003922 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303923 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003924 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3925 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303926 else
3927 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3928 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003929
Sathya Perla11ac75e2011-12-13 00:58:50 +00003930 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3931 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003932
3933 if (BE3_chip(adapter))
3934 be_cmd_set_hsw_config(adapter, 0, 0,
3935 adapter->if_handle,
3936 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003937done:
3938 kfree(adapter->vf_cfg);
3939 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303940 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003941}
3942
Sathya Perla77071332013-08-27 16:57:34 +05303943static void be_clear_queues(struct be_adapter *adapter)
3944{
3945 be_mcc_queues_destroy(adapter);
3946 be_rx_cqs_destroy(adapter);
3947 be_tx_queues_destroy(adapter);
3948 be_evt_queues_destroy(adapter);
3949}
3950
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303951static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003952{
Sathya Perla191eb752012-02-23 18:50:13 +00003953 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3954 cancel_delayed_work_sync(&adapter->work);
3955 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3956 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303957}
3958
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003959static void be_cancel_err_detection(struct be_adapter *adapter)
3960{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303961 struct be_error_recovery *err_rec = &adapter->error_recovery;
3962
3963 if (!be_err_recovery_workq)
3964 return;
3965
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003966 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303967 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003968 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3969 }
3970}
3971
Sathya Perlac9c47142014-03-27 10:46:19 +05303972static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3973{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003974 struct net_device *netdev = adapter->netdev;
3975
Sathya Perlac9c47142014-03-27 10:46:19 +05303976 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3977 be_cmd_manage_iface(adapter, adapter->if_handle,
3978 OP_CONVERT_TUNNEL_TO_NORMAL);
3979
3980 if (adapter->vxlan_port)
3981 be_cmd_set_vxlan_port(adapter, 0);
3982
3983 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3984 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003985
3986 netdev->hw_enc_features = 0;
3987 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303988 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303989}
3990
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003991static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3992 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05003993{
3994 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003995 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3996 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003997 u16 num_vf_qs = 1;
3998
Somnath Koturde2b1e02016-06-06 07:22:10 -04003999 /* Distribute the queue resources among the PF and it's VFs */
4000 if (num_vfs) {
4001 /* Divide the rx queues evenly among the VFs and the PF, capped
4002 * at VF-EQ-count. Any remainder queues belong to the PF.
4003 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05304004 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4005 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05004006
Somnath Koturde2b1e02016-06-06 07:22:10 -04004007 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4008 * RSS Tables per port. Provide RSS on VFs, only if number of
4009 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05004010 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004011 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05004012 num_vf_qs = 1;
4013 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004014
4015 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4016 * which are modifiable using SET_PROFILE_CONFIG cmd.
4017 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004018 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4019 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004020
4021 /* If RSS IFACE capability flags are modifiable for a VF, set the
4022 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4023 * more than 1 RSSQ is available for a VF.
4024 * Otherwise, provision only 1 queue pair for VF.
4025 */
4026 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4027 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4028 if (num_vf_qs > 1) {
4029 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4030 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4031 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4032 } else {
4033 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4034 BE_IF_FLAGS_DEFQ_RSS);
4035 }
4036 } else {
4037 num_vf_qs = 1;
4038 }
4039
4040 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4041 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4042 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4043 }
4044
4045 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4046 vft_res->max_rx_qs = num_vf_qs;
4047 vft_res->max_rss_qs = num_vf_qs;
4048 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4049 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4050
4051 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4052 * among the PF and it's VFs, if the fields are changeable
4053 */
4054 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4055 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4056
4057 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4058 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4059
4060 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4061 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4062
4063 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4064 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004065}
4066
Sathya Perlab7172412016-07-27 05:26:18 -04004067static void be_if_destroy(struct be_adapter *adapter)
4068{
4069 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4070
4071 kfree(adapter->pmac_id);
4072 adapter->pmac_id = NULL;
4073
4074 kfree(adapter->mc_list);
4075 adapter->mc_list = NULL;
4076
4077 kfree(adapter->uc_list);
4078 adapter->uc_list = NULL;
4079}
4080
Somnath Koturb05004a2013-12-05 12:08:16 +05304081static int be_clear(struct be_adapter *adapter)
4082{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004083 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004084 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004085
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304086 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004087
Sathya Perlab7172412016-07-27 05:26:18 -04004088 flush_workqueue(be_wq);
4089
Sathya Perla11ac75e2011-12-13 00:58:50 +00004090 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004091 be_vf_clear(adapter);
4092
Vasundhara Volambec84e62014-06-30 13:01:32 +05304093 /* Re-configure FW to distribute resources evenly across max-supported
4094 * number of VFs, only when VFs are not already enabled.
4095 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004096 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4097 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004098 be_calculate_vf_res(adapter,
4099 pci_sriov_get_totalvfs(pdev),
4100 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304101 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004102 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004103 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004104 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304105
Sathya Perlac9c47142014-03-27 10:46:19 +05304106 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004107
Sathya Perlab7172412016-07-27 05:26:18 -04004108 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004109
Sathya Perla77071332013-08-27 16:57:34 +05304110 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004111
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004112 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304113 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004114 return 0;
4115}
4116
Sathya Perla4c876612013-02-03 20:30:11 +00004117static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004118{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304119 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004120 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004121 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004122 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004123
Kalesh AP0700d812015-01-20 03:51:43 -05004124 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004125 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004126
Sathya Perla4c876612013-02-03 20:30:11 +00004127 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304128 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004129 status = be_cmd_get_profile_config(adapter, &res, NULL,
4130 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004131 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304132 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004133 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304134 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004135 /* Prevent VFs from enabling VLAN promiscuous
4136 * mode
4137 */
4138 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4139 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304140 }
Sathya Perla4c876612013-02-03 20:30:11 +00004141
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004142 /* PF should enable IF flags during proxy if_create call */
4143 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004144 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4145 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004146 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004147 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004148 }
Kalesh AP0700d812015-01-20 03:51:43 -05004149
4150 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004151}
4152
Sathya Perla39f1d942012-05-08 19:41:24 +00004153static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004154{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004155 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004156 int vf;
4157
Sathya Perla39f1d942012-05-08 19:41:24 +00004158 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4159 GFP_KERNEL);
4160 if (!adapter->vf_cfg)
4161 return -ENOMEM;
4162
Sathya Perla11ac75e2011-12-13 00:58:50 +00004163 for_all_vfs(adapter, vf_cfg, vf) {
4164 vf_cfg->if_handle = -1;
4165 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004166 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004167 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004168}
4169
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004170static int be_vf_setup(struct be_adapter *adapter)
4171{
Sathya Perla4c876612013-02-03 20:30:11 +00004172 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304173 struct be_vf_cfg *vf_cfg;
4174 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004175 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004176
Sathya Perla257a3fe2013-06-14 15:54:51 +05304177 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004178
4179 status = be_vf_setup_init(adapter);
4180 if (status)
4181 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004182
Sathya Perla4c876612013-02-03 20:30:11 +00004183 if (old_vfs) {
4184 for_all_vfs(adapter, vf_cfg, vf) {
4185 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4186 if (status)
4187 goto err;
4188 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004189
Sathya Perla4c876612013-02-03 20:30:11 +00004190 status = be_vfs_mac_query(adapter);
4191 if (status)
4192 goto err;
4193 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304194 status = be_vfs_if_create(adapter);
4195 if (status)
4196 goto err;
4197
Sathya Perla39f1d942012-05-08 19:41:24 +00004198 status = be_vf_eth_addr_config(adapter);
4199 if (status)
4200 goto err;
4201 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004202
Sathya Perla11ac75e2011-12-13 00:58:50 +00004203 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304204 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004205 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4206 vf + 1);
4207 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304208 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004209 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304210 BE_PRIV_FILTMGMT,
4211 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004212 if (!status) {
4213 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304214 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4215 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004216 }
Sathya Perla04a06022013-07-23 15:25:00 +05304217 }
4218
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304219 /* Allow full available bandwidth */
4220 if (!old_vfs)
4221 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004222
Kalesh APe7bcbd72015-05-06 05:30:32 -04004223 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4224 vf_cfg->if_handle, NULL,
4225 &spoofchk);
4226 if (!status)
4227 vf_cfg->spoofchk = spoofchk;
4228
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304229 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304230 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304231 be_cmd_set_logical_link_config(adapter,
4232 IFLA_VF_LINK_STATE_AUTO,
4233 vf+1);
4234 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004235 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004236
4237 if (!old_vfs) {
4238 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4239 if (status) {
4240 dev_err(dev, "SRIOV enable failed\n");
4241 adapter->num_vfs = 0;
4242 goto err;
4243 }
4244 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304245
Somnath Kotur884476b2016-06-22 08:54:55 -04004246 if (BE3_chip(adapter)) {
4247 /* On BE3, enable VEB only when SRIOV is enabled */
4248 status = be_cmd_set_hsw_config(adapter, 0, 0,
4249 adapter->if_handle,
4250 PORT_FWD_TYPE_VEB, 0);
4251 if (status)
4252 goto err;
4253 }
4254
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304255 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004256 return 0;
4257err:
Sathya Perla4c876612013-02-03 20:30:11 +00004258 dev_err(dev, "VF setup failed\n");
4259 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004260 return status;
4261}
4262
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304263/* Converting function_mode bits on BE3 to SH mc_type enums */
4264
4265static u8 be_convert_mc_type(u32 function_mode)
4266{
Suresh Reddy66064db2014-06-23 16:41:29 +05304267 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304268 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304269 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304270 return FLEX10;
4271 else if (function_mode & VNIC_MODE)
4272 return vNIC2;
4273 else if (function_mode & UMC_ENABLED)
4274 return UMC;
4275 else
4276 return MC_NONE;
4277}
4278
Sathya Perla92bf14a2013-08-27 16:57:32 +05304279/* On BE2/BE3 FW does not suggest the supported limits */
4280static void BEx_get_resources(struct be_adapter *adapter,
4281 struct be_resources *res)
4282{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304283 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304284
4285 if (be_physfn(adapter))
4286 res->max_uc_mac = BE_UC_PMAC_COUNT;
4287 else
4288 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4289
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304290 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4291
4292 if (be_is_mc(adapter)) {
4293 /* Assuming that there are 4 channels per port,
4294 * when multi-channel is enabled
4295 */
4296 if (be_is_qnq_mode(adapter))
4297 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4298 else
4299 /* In a non-qnq multichannel mode, the pvid
4300 * takes up one vlan entry
4301 */
4302 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4303 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304304 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304305 }
4306
Sathya Perla92bf14a2013-08-27 16:57:32 +05304307 res->max_mcast_mac = BE_MAX_MC;
4308
Vasundhara Volama5243da2014-03-11 18:53:07 +05304309 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4310 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4311 * *only* if it is RSS-capable.
4312 */
4313 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004314 be_virtfn(adapter) ||
4315 (be_is_mc(adapter) &&
4316 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304317 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304318 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4319 struct be_resources super_nic_res = {0};
4320
4321 /* On a SuperNIC profile, the driver needs to use the
4322 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4323 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004324 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4325 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4326 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304327 /* Some old versions of BE3 FW don't report max_tx_qs value */
4328 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4329 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304330 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304331 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304332
4333 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4334 !use_sriov && be_physfn(adapter))
4335 res->max_rss_qs = (adapter->be3_native) ?
4336 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4337 res->max_rx_qs = res->max_rss_qs + 1;
4338
Suresh Reddye3dc8672014-01-06 13:02:25 +05304339 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304340 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304341 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4342 else
4343 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304344
4345 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004346 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304347 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4348 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4349}
4350
Sathya Perla30128032011-11-10 19:17:57 +00004351static void be_setup_init(struct be_adapter *adapter)
4352{
4353 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004354 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004355 adapter->if_handle = -1;
4356 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004357 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304358 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004359 if (be_physfn(adapter))
4360 adapter->cmd_privileges = MAX_PRIVILEGES;
4361 else
4362 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004363}
4364
Somnath Koturde2b1e02016-06-06 07:22:10 -04004365/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4366 * However, this HW limitation is not exposed to the host via any SLI cmd.
4367 * As a result, in the case of SRIOV and in particular multi-partition configs
4368 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4369 * for distribution between the VFs. This self-imposed limit will determine the
4370 * no: of VFs for which RSS can be enabled.
4371 */
Baoyou Xied766e7e2016-09-18 16:35:29 +08004372static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
Somnath Koturde2b1e02016-06-06 07:22:10 -04004373{
4374 struct be_port_resources port_res = {0};
4375 u8 rss_tables_on_port;
4376 u16 max_vfs = be_max_vfs(adapter);
4377
4378 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4379 RESOURCE_LIMITS, 0);
4380
4381 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4382
4383 /* Each PF Pool's RSS Tables limit =
4384 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4385 */
4386 adapter->pool_res.max_rss_tables =
4387 max_vfs * rss_tables_on_port / port_res.max_vfs;
4388}
4389
Vasundhara Volambec84e62014-06-30 13:01:32 +05304390static int be_get_sriov_config(struct be_adapter *adapter)
4391{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304392 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304393 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304394
Somnath Koturde2b1e02016-06-06 07:22:10 -04004395 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4396 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304397
Vasundhara Volamace40af2015-03-04 00:44:34 -05004398 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304399 if (BE3_chip(adapter) && !res.max_vfs) {
4400 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4401 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4402 }
4403
Sathya Perlad3d18312014-08-01 17:47:30 +05304404 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304405
Vasundhara Volamace40af2015-03-04 00:44:34 -05004406 /* If during previous unload of the driver, the VFs were not disabled,
4407 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4408 * Instead use the TotalVFs value stored in the pci-dev struct.
4409 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304410 old_vfs = pci_num_vf(adapter->pdev);
4411 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004412 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4413 old_vfs);
4414
4415 adapter->pool_res.max_vfs =
4416 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304417 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304418 }
4419
Somnath Koturde2b1e02016-06-06 07:22:10 -04004420 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4421 be_calculate_pf_pool_rss_tables(adapter);
4422 dev_info(&adapter->pdev->dev,
4423 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4424 be_max_pf_pool_rss_tables(adapter));
4425 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304426 return 0;
4427}
4428
Vasundhara Volamace40af2015-03-04 00:44:34 -05004429static void be_alloc_sriov_res(struct be_adapter *adapter)
4430{
4431 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004432 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004433 int status;
4434
4435 be_get_sriov_config(adapter);
4436
4437 if (!old_vfs)
4438 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4439
4440 /* When the HW is in SRIOV capable configuration, the PF-pool
4441 * resources are given to PF during driver load, if there are no
4442 * old VFs. This facility is not available in BE3 FW.
4443 * Also, this is done by FW in Lancer chip.
4444 */
4445 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004446 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004447 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004448 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004449 if (status)
4450 dev_err(&adapter->pdev->dev,
4451 "Failed to optimize SRIOV resources\n");
4452 }
4453}
4454
Sathya Perla92bf14a2013-08-27 16:57:32 +05304455static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004456{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304457 struct device *dev = &adapter->pdev->dev;
4458 struct be_resources res = {0};
4459 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004460
Sathya Perla92bf14a2013-08-27 16:57:32 +05304461 /* For Lancer, SH etc read per-function resource limits from FW.
4462 * GET_FUNC_CONFIG returns per function guaranteed limits.
4463 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4464 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004465 if (BEx_chip(adapter)) {
4466 BEx_get_resources(adapter, &res);
4467 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304468 status = be_cmd_get_func_config(adapter, &res);
4469 if (status)
4470 return status;
4471
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004472 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4473 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4474 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4475 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004476 }
4477
Sathya Perlace7faf02016-06-22 08:54:53 -04004478 /* If RoCE is supported stash away half the EQs for RoCE */
4479 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4480 res.max_evt_qs / 2 : res.max_evt_qs;
4481 adapter->res = res;
4482
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004483 /* If FW supports RSS default queue, then skip creating non-RSS
4484 * queue for non-IP traffic.
4485 */
4486 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4487 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4488
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304489 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4490 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004491 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304492 be_max_vfs(adapter));
4493 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4494 be_max_uc(adapter), be_max_mc(adapter),
4495 be_max_vlans(adapter));
4496
Sathya Perlae2617682016-06-22 08:54:54 -04004497 /* Ensure RX and TX queues are created in pairs at init time */
4498 adapter->cfg_num_rx_irqs =
4499 min_t(u16, netif_get_num_default_rss_queues(),
4500 be_max_qp_irqs(adapter));
4501 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304502 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004503}
4504
Sathya Perla39f1d942012-05-08 19:41:24 +00004505static int be_get_config(struct be_adapter *adapter)
4506{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004507 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304508 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004509
Suresh Reddy980df242015-12-30 01:29:03 -05004510 status = be_cmd_get_cntl_attributes(adapter);
4511 if (status)
4512 return status;
4513
Kalesh APe97e3cd2014-07-17 16:20:26 +05304514 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004515 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304516 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004517
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004518 if (!lancer_chip(adapter) && be_physfn(adapter))
4519 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4520
Sathya Perla6b085ba2015-02-23 04:20:09 -05004521 if (BEx_chip(adapter)) {
4522 level = be_cmd_get_fw_log_level(adapter);
4523 adapter->msg_enable =
4524 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4525 }
4526
4527 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004528 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4529 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004530
Vasundhara Volam21252372015-02-06 08:18:42 -05004531 be_cmd_query_port_name(adapter);
4532
4533 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304534 status = be_cmd_get_active_profile(adapter, &profile_id);
4535 if (!status)
4536 dev_info(&adapter->pdev->dev,
4537 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304538 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304539
Sathya Perla92bf14a2013-08-27 16:57:32 +05304540 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004541}
4542
Sathya Perla95046b92013-07-23 15:25:02 +05304543static int be_mac_setup(struct be_adapter *adapter)
4544{
4545 u8 mac[ETH_ALEN];
4546 int status;
4547
4548 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4549 status = be_cmd_get_perm_mac(adapter, mac);
4550 if (status)
4551 return status;
4552
4553 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4554 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304555 }
4556
Sathya Perla95046b92013-07-23 15:25:02 +05304557 return 0;
4558}
4559
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304560static void be_schedule_worker(struct be_adapter *adapter)
4561{
Sathya Perlab7172412016-07-27 05:26:18 -04004562 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304563 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4564}
4565
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304566static void be_destroy_err_recovery_workq(void)
4567{
4568 if (!be_err_recovery_workq)
4569 return;
4570
4571 flush_workqueue(be_err_recovery_workq);
4572 destroy_workqueue(be_err_recovery_workq);
4573 be_err_recovery_workq = NULL;
4574}
4575
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304576static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004577{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304578 struct be_error_recovery *err_rec = &adapter->error_recovery;
4579
4580 if (!be_err_recovery_workq)
4581 return;
4582
4583 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4584 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004585 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4586}
4587
Sathya Perla77071332013-08-27 16:57:34 +05304588static int be_setup_queues(struct be_adapter *adapter)
4589{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304590 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304591 int status;
4592
4593 status = be_evt_queues_create(adapter);
4594 if (status)
4595 goto err;
4596
4597 status = be_tx_qs_create(adapter);
4598 if (status)
4599 goto err;
4600
4601 status = be_rx_cqs_create(adapter);
4602 if (status)
4603 goto err;
4604
4605 status = be_mcc_queues_create(adapter);
4606 if (status)
4607 goto err;
4608
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304609 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4610 if (status)
4611 goto err;
4612
4613 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4614 if (status)
4615 goto err;
4616
Sathya Perla77071332013-08-27 16:57:34 +05304617 return 0;
4618err:
4619 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4620 return status;
4621}
4622
Ajit Khaparde62219062016-02-10 22:45:53 +05304623static int be_if_create(struct be_adapter *adapter)
4624{
4625 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4626 u32 cap_flags = be_if_cap_flags(adapter);
4627 int status;
4628
Sathya Perlab7172412016-07-27 05:26:18 -04004629 /* alloc required memory for other filtering fields */
4630 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4631 sizeof(*adapter->pmac_id), GFP_KERNEL);
4632 if (!adapter->pmac_id)
4633 return -ENOMEM;
4634
4635 adapter->mc_list = kcalloc(be_max_mc(adapter),
4636 sizeof(*adapter->mc_list), GFP_KERNEL);
4637 if (!adapter->mc_list)
4638 return -ENOMEM;
4639
4640 adapter->uc_list = kcalloc(be_max_uc(adapter),
4641 sizeof(*adapter->uc_list), GFP_KERNEL);
4642 if (!adapter->uc_list)
4643 return -ENOMEM;
4644
Sathya Perlae2617682016-06-22 08:54:54 -04004645 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304646 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4647
4648 en_flags &= cap_flags;
4649 /* will enable all the needed filter flags in be_open() */
4650 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4651 &adapter->if_handle, 0);
4652
Sathya Perlab7172412016-07-27 05:26:18 -04004653 if (status)
4654 return status;
4655
4656 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304657}
4658
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304659int be_update_queues(struct be_adapter *adapter)
4660{
4661 struct net_device *netdev = adapter->netdev;
4662 int status;
4663
4664 if (netif_running(netdev))
4665 be_close(netdev);
4666
4667 be_cancel_worker(adapter);
4668
4669 /* If any vectors have been shared with RoCE we cannot re-program
4670 * the MSIx table.
4671 */
4672 if (!adapter->num_msix_roce_vec)
4673 be_msix_disable(adapter);
4674
4675 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304676 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4677 if (status)
4678 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304679
4680 if (!msix_enabled(adapter)) {
4681 status = be_msix_enable(adapter);
4682 if (status)
4683 return status;
4684 }
4685
Ajit Khaparde62219062016-02-10 22:45:53 +05304686 status = be_if_create(adapter);
4687 if (status)
4688 return status;
4689
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304690 status = be_setup_queues(adapter);
4691 if (status)
4692 return status;
4693
4694 be_schedule_worker(adapter);
4695
4696 if (netif_running(netdev))
4697 status = be_open(netdev);
4698
4699 return status;
4700}
4701
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004702static inline int fw_major_num(const char *fw_ver)
4703{
4704 int fw_major = 0, i;
4705
4706 i = sscanf(fw_ver, "%d.", &fw_major);
4707 if (i != 1)
4708 return 0;
4709
4710 return fw_major;
4711}
4712
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304713/* If it is error recovery, FLR the PF
4714 * Else if any VFs are already enabled don't FLR the PF
4715 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004716static bool be_reset_required(struct be_adapter *adapter)
4717{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304718 if (be_error_recovering(adapter))
4719 return true;
4720 else
4721 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004722}
4723
4724/* Wait for the FW to be ready and perform the required initialization */
4725static int be_func_init(struct be_adapter *adapter)
4726{
4727 int status;
4728
4729 status = be_fw_wait_ready(adapter);
4730 if (status)
4731 return status;
4732
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304733 /* FW is now ready; clear errors to allow cmds/doorbell */
4734 be_clear_error(adapter, BE_CLEAR_ALL);
4735
Sathya Perlaf962f842015-02-23 04:20:16 -05004736 if (be_reset_required(adapter)) {
4737 status = be_cmd_reset_function(adapter);
4738 if (status)
4739 return status;
4740
4741 /* Wait for interrupts to quiesce after an FLR */
4742 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004743 }
4744
4745 /* Tell FW we're ready to fire cmds */
4746 status = be_cmd_fw_init(adapter);
4747 if (status)
4748 return status;
4749
4750 /* Allow interrupts for other ULPs running on NIC function */
4751 be_intr_set(adapter, true);
4752
4753 return 0;
4754}
4755
Sathya Perla5fb379e2009-06-18 00:02:59 +00004756static int be_setup(struct be_adapter *adapter)
4757{
Sathya Perla39f1d942012-05-08 19:41:24 +00004758 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004759 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004760
Sathya Perlaf962f842015-02-23 04:20:16 -05004761 status = be_func_init(adapter);
4762 if (status)
4763 return status;
4764
Sathya Perla30128032011-11-10 19:17:57 +00004765 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004766
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004767 if (!lancer_chip(adapter))
4768 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004769
Suresh Reddy980df242015-12-30 01:29:03 -05004770 /* invoke this cmd first to get pf_num and vf_num which are needed
4771 * for issuing profile related cmds
4772 */
4773 if (!BEx_chip(adapter)) {
4774 status = be_cmd_get_func_config(adapter, NULL);
4775 if (status)
4776 return status;
4777 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004778
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004779 status = be_get_config(adapter);
4780 if (status)
4781 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004782
Somnath Koturde2b1e02016-06-06 07:22:10 -04004783 if (!BE2_chip(adapter) && be_physfn(adapter))
4784 be_alloc_sriov_res(adapter);
4785
4786 status = be_get_resources(adapter);
4787 if (status)
4788 goto err;
4789
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004790 status = be_msix_enable(adapter);
4791 if (status)
4792 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004793
Kalesh APbcc84142015-08-05 03:27:48 -04004794 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304795 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004796 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004797 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004798
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304799 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4800 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304801 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304802 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004803 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004804 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004805
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004806 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004807
Sathya Perla95046b92013-07-23 15:25:02 +05304808 status = be_mac_setup(adapter);
4809 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004810 goto err;
4811
Kalesh APe97e3cd2014-07-17 16:20:26 +05304812 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304813 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004814
Somnath Koture9e2a902013-10-24 14:37:53 +05304815 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304816 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304817 adapter->fw_ver);
4818 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4819 }
4820
Kalesh AP00d594c2015-01-20 03:51:44 -05004821 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4822 adapter->rx_fc);
4823 if (status)
4824 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4825 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004826
Kalesh AP00d594c2015-01-20 03:51:44 -05004827 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4828 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004829
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304830 if (be_physfn(adapter))
4831 be_cmd_set_logical_link_config(adapter,
4832 IFLA_VF_LINK_STATE_AUTO, 0);
4833
Somnath Kotur884476b2016-06-22 08:54:55 -04004834 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4835 * confusing a linux bridge or OVS that it might be connected to.
4836 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4837 * when SRIOV is not enabled.
4838 */
4839 if (BE3_chip(adapter))
4840 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4841 PORT_FWD_TYPE_PASSTHRU, 0);
4842
Vasundhara Volambec84e62014-06-30 13:01:32 +05304843 if (adapter->num_vfs)
4844 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004845
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004846 status = be_cmd_get_phy_info(adapter);
4847 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004848 adapter->phy.fc_autoneg = 1;
4849
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304850 if (be_physfn(adapter) && !lancer_chip(adapter))
4851 be_cmd_set_features(adapter);
4852
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304853 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304854 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004855 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004856err:
4857 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004858 return status;
4859}
4860
Ivan Vecera66268732011-12-08 01:31:21 +00004861#ifdef CONFIG_NET_POLL_CONTROLLER
4862static void be_netpoll(struct net_device *netdev)
4863{
4864 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004865 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004866 int i;
4867
Sathya Perlae49cc342012-11-27 19:50:02 +00004868 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004869 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004870 napi_schedule(&eqo->napi);
4871 }
Ivan Vecera66268732011-12-08 01:31:21 +00004872}
4873#endif
4874
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004875int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4876{
4877 const struct firmware *fw;
4878 int status;
4879
4880 if (!netif_running(adapter->netdev)) {
4881 dev_err(&adapter->pdev->dev,
4882 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304883 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004884 }
4885
4886 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4887 if (status)
4888 goto fw_exit;
4889
4890 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4891
4892 if (lancer_chip(adapter))
4893 status = lancer_fw_download(adapter, fw);
4894 else
4895 status = be_fw_download(adapter, fw);
4896
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004897 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304898 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004899
Ajit Khaparde84517482009-09-04 03:12:16 +00004900fw_exit:
4901 release_firmware(fw);
4902 return status;
4903}
4904
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004905static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4906 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004907{
4908 struct be_adapter *adapter = netdev_priv(dev);
4909 struct nlattr *attr, *br_spec;
4910 int rem;
4911 int status = 0;
4912 u16 mode = 0;
4913
4914 if (!sriov_enabled(adapter))
4915 return -EOPNOTSUPP;
4916
4917 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004918 if (!br_spec)
4919 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004920
4921 nla_for_each_nested(attr, br_spec, rem) {
4922 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4923 continue;
4924
Thomas Grafb7c1a312014-11-26 13:42:17 +01004925 if (nla_len(attr) < sizeof(mode))
4926 return -EINVAL;
4927
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004928 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004929 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4930 return -EOPNOTSUPP;
4931
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004932 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4933 return -EINVAL;
4934
4935 status = be_cmd_set_hsw_config(adapter, 0, 0,
4936 adapter->if_handle,
4937 mode == BRIDGE_MODE_VEPA ?
4938 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004939 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004940 if (status)
4941 goto err;
4942
4943 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4944 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4945
4946 return status;
4947 }
4948err:
4949 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4950 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4951
4952 return status;
4953}
4954
4955static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004956 struct net_device *dev, u32 filter_mask,
4957 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004958{
4959 struct be_adapter *adapter = netdev_priv(dev);
4960 int status = 0;
4961 u8 hsw_mode;
4962
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004963 /* BE and Lancer chips support VEB mode only */
4964 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004965 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4966 if (!pci_sriov_get_totalvfs(adapter->pdev))
4967 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004968 hsw_mode = PORT_FWD_TYPE_VEB;
4969 } else {
4970 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004971 adapter->if_handle, &hsw_mode,
4972 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004973 if (status)
4974 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004975
4976 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4977 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004978 }
4979
4980 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4981 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004982 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004983 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004984}
4985
Sathya Perlab7172412016-07-27 05:26:18 -04004986static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4987 void (*func)(struct work_struct *))
4988{
4989 struct be_cmd_work *work;
4990
4991 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4992 if (!work) {
4993 dev_err(&adapter->pdev->dev,
4994 "be_work memory allocation failed\n");
4995 return NULL;
4996 }
4997
4998 INIT_WORK(&work->work, func);
4999 work->adapter = adapter;
5000 return work;
5001}
5002
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005003/* VxLAN offload Notes:
5004 *
5005 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5006 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5007 * is expected to work across all types of IP tunnels once exported. Skyhawk
5008 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305009 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5010 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5011 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005012 *
5013 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5014 * adds more than one port, disable offloads and don't re-enable them again
5015 * until after all the tunnels are removed.
5016 */
Sathya Perlab7172412016-07-27 05:26:18 -04005017static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305018{
Sathya Perlab7172412016-07-27 05:26:18 -04005019 struct be_cmd_work *cmd_work =
5020 container_of(work, struct be_cmd_work, work);
5021 struct be_adapter *adapter = cmd_work->adapter;
5022 struct net_device *netdev = adapter->netdev;
Sathya Perlac9c47142014-03-27 10:46:19 +05305023 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04005024 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305025 int status;
5026
Jiri Benc1e5b3112015-09-17 16:11:13 +02005027 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5028 adapter->vxlan_port_aliases++;
Sathya Perlab7172412016-07-27 05:26:18 -04005029 goto done;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005030 }
5031
Sathya Perlac9c47142014-03-27 10:46:19 +05305032 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305033 dev_info(dev,
5034 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005035 dev_info(dev, "Disabling VxLAN offloads\n");
5036 adapter->vxlan_port_count++;
5037 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305038 }
5039
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005040 if (adapter->vxlan_port_count++ >= 1)
Sathya Perlab7172412016-07-27 05:26:18 -04005041 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005042
Sathya Perlac9c47142014-03-27 10:46:19 +05305043 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5044 OP_CONVERT_NORMAL_TO_TUNNEL);
5045 if (status) {
5046 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5047 goto err;
5048 }
5049
5050 status = be_cmd_set_vxlan_port(adapter, port);
5051 if (status) {
5052 dev_warn(dev, "Failed to add VxLAN port\n");
5053 goto err;
5054 }
5055 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5056 adapter->vxlan_port = port;
5057
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005058 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5059 NETIF_F_TSO | NETIF_F_TSO6 |
5060 NETIF_F_GSO_UDP_TUNNEL;
5061 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305062 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005063
Sathya Perlac9c47142014-03-27 10:46:19 +05305064 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5065 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005066 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305067err:
5068 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04005069done:
5070 kfree(cmd_work);
Sathya Perlac9c47142014-03-27 10:46:19 +05305071}
5072
Sathya Perlab7172412016-07-27 05:26:18 -04005073static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305074{
Sathya Perlab7172412016-07-27 05:26:18 -04005075 struct be_cmd_work *cmd_work =
5076 container_of(work, struct be_cmd_work, work);
5077 struct be_adapter *adapter = cmd_work->adapter;
5078 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305079
5080 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005081 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305082
Jiri Benc1e5b3112015-09-17 16:11:13 +02005083 if (adapter->vxlan_port_aliases) {
5084 adapter->vxlan_port_aliases--;
Sathya Perlab7172412016-07-27 05:26:18 -04005085 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005086 }
5087
Sathya Perlac9c47142014-03-27 10:46:19 +05305088 be_disable_vxlan_offloads(adapter);
5089
5090 dev_info(&adapter->pdev->dev,
5091 "Disabled VxLAN offloads for UDP port %d\n",
5092 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005093done:
5094 adapter->vxlan_port_count--;
Sathya Perlab7172412016-07-27 05:26:18 -04005095out:
5096 kfree(cmd_work);
5097}
5098
5099static void be_cfg_vxlan_port(struct net_device *netdev,
5100 struct udp_tunnel_info *ti,
5101 void (*func)(struct work_struct *))
5102{
5103 struct be_adapter *adapter = netdev_priv(netdev);
5104 struct be_cmd_work *cmd_work;
5105
5106 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5107 return;
5108
5109 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5110 return;
5111
5112 cmd_work = be_alloc_work(adapter, func);
5113 if (cmd_work) {
5114 cmd_work->info.vxlan_port = ti->port;
5115 queue_work(be_wq, &cmd_work->work);
5116 }
5117}
5118
5119static void be_del_vxlan_port(struct net_device *netdev,
5120 struct udp_tunnel_info *ti)
5121{
5122 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5123}
5124
5125static void be_add_vxlan_port(struct net_device *netdev,
5126 struct udp_tunnel_info *ti)
5127{
5128 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305129}
Joe Stringer725d5482014-11-13 16:38:13 -08005130
Jesse Gross5f352272014-12-23 22:37:26 -08005131static netdev_features_t be_features_check(struct sk_buff *skb,
5132 struct net_device *dev,
5133 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005134{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305135 struct be_adapter *adapter = netdev_priv(dev);
5136 u8 l4_hdr = 0;
5137
5138 /* The code below restricts offload features for some tunneled packets.
5139 * Offload features for normal (non tunnel) packets are unchanged.
5140 */
5141 if (!skb->encapsulation ||
5142 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5143 return features;
5144
5145 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5146 * should disable tunnel offload features if it's not a VxLAN packet,
5147 * as tunnel offloads have been enabled only for VxLAN. This is done to
5148 * allow other tunneled traffic like GRE work fine while VxLAN
5149 * offloads are configured in Skyhawk-R.
5150 */
5151 switch (vlan_get_protocol(skb)) {
5152 case htons(ETH_P_IP):
5153 l4_hdr = ip_hdr(skb)->protocol;
5154 break;
5155 case htons(ETH_P_IPV6):
5156 l4_hdr = ipv6_hdr(skb)->nexthdr;
5157 break;
5158 default:
5159 return features;
5160 }
5161
5162 if (l4_hdr != IPPROTO_UDP ||
5163 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5164 skb->inner_protocol != htons(ETH_P_TEB) ||
5165 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5166 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08005167 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305168
5169 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005170}
Sathya Perlac9c47142014-03-27 10:46:19 +05305171
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305172static int be_get_phys_port_id(struct net_device *dev,
5173 struct netdev_phys_item_id *ppid)
5174{
5175 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5176 struct be_adapter *adapter = netdev_priv(dev);
5177 u8 *id;
5178
5179 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5180 return -ENOSPC;
5181
5182 ppid->id[0] = adapter->hba_port_num + 1;
5183 id = &ppid->id[1];
5184 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5185 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5186 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5187
5188 ppid->id_len = id_len;
5189
5190 return 0;
5191}
5192
Sathya Perlab7172412016-07-27 05:26:18 -04005193static void be_set_rx_mode(struct net_device *dev)
5194{
5195 struct be_adapter *adapter = netdev_priv(dev);
5196 struct be_cmd_work *work;
5197
5198 work = be_alloc_work(adapter, be_work_set_rx_mode);
5199 if (work)
5200 queue_work(be_wq, &work->work);
5201}
5202
stephen hemmingere5686ad2012-01-05 19:10:25 +00005203static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005204 .ndo_open = be_open,
5205 .ndo_stop = be_close,
5206 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005207 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005208 .ndo_set_mac_address = be_mac_addr_set,
5209 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005210 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005211 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005212 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5213 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005214 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005215 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005216 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005217 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305218 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005219 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005220#ifdef CONFIG_NET_POLL_CONTROLLER
5221 .ndo_poll_controller = be_netpoll,
5222#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005223 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5224 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305225#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305226 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305227#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005228 .ndo_udp_tunnel_add = be_add_vxlan_port,
5229 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005230 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305231 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005232};
5233
5234static void be_netdev_init(struct net_device *netdev)
5235{
5236 struct be_adapter *adapter = netdev_priv(netdev);
5237
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005238 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005239 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005240 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305241 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005242 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005243
5244 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005245 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005246
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005247 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005248 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005249
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005250 netdev->priv_flags |= IFF_UNICAST_FLT;
5251
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005252 netdev->flags |= IFF_MULTICAST;
5253
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305254 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005255
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005256 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005257
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005258 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005259}
5260
Kalesh AP87ac1a52015-02-23 04:20:15 -05005261static void be_cleanup(struct be_adapter *adapter)
5262{
5263 struct net_device *netdev = adapter->netdev;
5264
5265 rtnl_lock();
5266 netif_device_detach(netdev);
5267 if (netif_running(netdev))
5268 be_close(netdev);
5269 rtnl_unlock();
5270
5271 be_clear(adapter);
5272}
5273
Kalesh AP484d76f2015-02-23 04:20:14 -05005274static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005275{
Kalesh APd0e1b312015-02-23 04:20:12 -05005276 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005277 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005278
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005279 status = be_setup(adapter);
5280 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005281 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005282
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005283 rtnl_lock();
5284 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005285 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005286 rtnl_unlock();
5287
5288 if (status)
5289 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005290
Kalesh APd0e1b312015-02-23 04:20:12 -05005291 netif_device_attach(netdev);
5292
Kalesh AP484d76f2015-02-23 04:20:14 -05005293 return 0;
5294}
5295
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305296static void be_soft_reset(struct be_adapter *adapter)
5297{
5298 u32 val;
5299
5300 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5301 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5302 val |= SLIPORT_SOFTRESET_SR_MASK;
5303 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5304}
5305
5306static bool be_err_is_recoverable(struct be_adapter *adapter)
5307{
5308 struct be_error_recovery *err_rec = &adapter->error_recovery;
5309 unsigned long initial_idle_time =
5310 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5311 unsigned long recovery_interval =
5312 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5313 u16 ue_err_code;
5314 u32 val;
5315
5316 val = be_POST_stage_get(adapter);
5317 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5318 return false;
5319 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5320 if (ue_err_code == 0)
5321 return false;
5322
5323 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5324 ue_err_code);
5325
5326 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5327 dev_err(&adapter->pdev->dev,
5328 "Cannot recover within %lu sec from driver load\n",
5329 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5330 return false;
5331 }
5332
5333 if (err_rec->last_recovery_time &&
5334 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5335 dev_err(&adapter->pdev->dev,
5336 "Cannot recover within %lu sec from last recovery\n",
5337 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5338 return false;
5339 }
5340
5341 if (ue_err_code == err_rec->last_err_code) {
5342 dev_err(&adapter->pdev->dev,
5343 "Cannot recover from a consecutive TPE error\n");
5344 return false;
5345 }
5346
5347 err_rec->last_recovery_time = jiffies;
5348 err_rec->last_err_code = ue_err_code;
5349 return true;
5350}
5351
5352static int be_tpe_recover(struct be_adapter *adapter)
5353{
5354 struct be_error_recovery *err_rec = &adapter->error_recovery;
5355 int status = -EAGAIN;
5356 u32 val;
5357
5358 switch (err_rec->recovery_state) {
5359 case ERR_RECOVERY_ST_NONE:
5360 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5361 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5362 break;
5363
5364 case ERR_RECOVERY_ST_DETECT:
5365 val = be_POST_stage_get(adapter);
5366 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5367 POST_STAGE_RECOVERABLE_ERR) {
5368 dev_err(&adapter->pdev->dev,
5369 "Unrecoverable HW error detected: 0x%x\n", val);
5370 status = -EINVAL;
5371 err_rec->resched_delay = 0;
5372 break;
5373 }
5374
5375 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5376
5377 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5378 * milliseconds before it checks for final error status in
5379 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5380 * If it does, then PF0 initiates a Soft Reset.
5381 */
5382 if (adapter->pf_num == 0) {
5383 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5384 err_rec->resched_delay = err_rec->ue_to_reset_time -
5385 ERR_RECOVERY_UE_DETECT_DURATION;
5386 break;
5387 }
5388
5389 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5390 err_rec->resched_delay = err_rec->ue_to_poll_time -
5391 ERR_RECOVERY_UE_DETECT_DURATION;
5392 break;
5393
5394 case ERR_RECOVERY_ST_RESET:
5395 if (!be_err_is_recoverable(adapter)) {
5396 dev_err(&adapter->pdev->dev,
5397 "Failed to meet recovery criteria\n");
5398 status = -EIO;
5399 err_rec->resched_delay = 0;
5400 break;
5401 }
5402 be_soft_reset(adapter);
5403 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5404 err_rec->resched_delay = err_rec->ue_to_poll_time -
5405 err_rec->ue_to_reset_time;
5406 break;
5407
5408 case ERR_RECOVERY_ST_PRE_POLL:
5409 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5410 err_rec->resched_delay = 0;
5411 status = 0; /* done */
5412 break;
5413
5414 default:
5415 status = -EINVAL;
5416 err_rec->resched_delay = 0;
5417 break;
5418 }
5419
5420 return status;
5421}
5422
Kalesh AP484d76f2015-02-23 04:20:14 -05005423static int be_err_recover(struct be_adapter *adapter)
5424{
Kalesh AP484d76f2015-02-23 04:20:14 -05005425 int status;
5426
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305427 if (!lancer_chip(adapter)) {
5428 if (!adapter->error_recovery.recovery_supported ||
5429 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5430 return -EIO;
5431 status = be_tpe_recover(adapter);
5432 if (status)
5433 goto err;
5434 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305435
5436 /* Wait for adapter to reach quiescent state before
5437 * destroying queues
5438 */
5439 status = be_fw_wait_ready(adapter);
5440 if (status)
5441 goto err;
5442
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305443 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5444
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305445 be_cleanup(adapter);
5446
Kalesh AP484d76f2015-02-23 04:20:14 -05005447 status = be_resume(adapter);
5448 if (status)
5449 goto err;
5450
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305451 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5452
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005453err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005454 return status;
5455}
5456
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005457static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005458{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305459 struct be_error_recovery *err_rec =
5460 container_of(work, struct be_error_recovery,
5461 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005462 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305463 container_of(err_rec, struct be_adapter,
5464 error_recovery);
5465 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305466 struct device *dev = &adapter->pdev->dev;
5467 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005468
5469 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305470 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305471 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005472
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305473 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305474 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305475 err_rec->recovery_retries = 0;
5476 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305477 dev_info(dev, "Adapter recovery successful\n");
5478 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305479 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5480 /* BEx/SH recovery state machine */
5481 if (adapter->pf_num == 0 &&
5482 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5483 dev_err(&adapter->pdev->dev,
5484 "Adapter recovery in progress\n");
5485 resched_delay = err_rec->resched_delay;
5486 goto reschedule_task;
5487 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305488 /* For VFs, check if PF have allocated resources
5489 * every second.
5490 */
5491 dev_err(dev, "Re-trying adapter recovery\n");
5492 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305493 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5494 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305495 /* In case of another error during recovery, it takes 30 sec
5496 * for adapter to come out of error. Retry error recovery after
5497 * this time interval.
5498 */
5499 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305500 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305501 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305502 } else {
5503 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305504 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005505 }
5506
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305507 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305508
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305509reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305510 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005511}
5512
Vasundhara Volam21252372015-02-06 08:18:42 -05005513static void be_log_sfp_info(struct be_adapter *adapter)
5514{
5515 int status;
5516
5517 status = be_cmd_query_sfp_info(adapter);
5518 if (!status) {
5519 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305520 "Port %c: %s Vendor: %s part no: %s",
5521 adapter->port_name,
5522 be_misconfig_evt_port_state[adapter->phy_state],
5523 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005524 adapter->phy.vendor_pn);
5525 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305526 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005527}
5528
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005529static void be_worker(struct work_struct *work)
5530{
5531 struct be_adapter *adapter =
5532 container_of(work, struct be_adapter, work.work);
5533 struct be_rx_obj *rxo;
5534 int i;
5535
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005536 if (be_physfn(adapter) &&
5537 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5538 be_cmd_get_die_temperature(adapter);
5539
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005540 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005541 * mcc completions
5542 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005543 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005544 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005545 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005546 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005547 goto reschedule;
5548 }
5549
5550 if (!adapter->stats_cmd_sent) {
5551 if (lancer_chip(adapter))
5552 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305553 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005554 else
5555 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5556 }
5557
5558 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305559 /* Replenish RX-queues starved due to memory
5560 * allocation failures.
5561 */
5562 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305563 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005564 }
5565
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005566 /* EQ-delay update for Skyhawk is done while notifying EQ */
5567 if (!skyhawk_chip(adapter))
5568 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005569
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305570 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005571 be_log_sfp_info(adapter);
5572
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005573reschedule:
5574 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005575 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005576}
5577
Sathya Perla78fad34e2015-02-23 04:20:08 -05005578static void be_unmap_pci_bars(struct be_adapter *adapter)
5579{
5580 if (adapter->csr)
5581 pci_iounmap(adapter->pdev, adapter->csr);
5582 if (adapter->db)
5583 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005584 if (adapter->pcicfg && adapter->pcicfg_mapped)
5585 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005586}
5587
5588static int db_bar(struct be_adapter *adapter)
5589{
Kalesh AP18c57c72015-05-06 05:30:38 -04005590 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005591 return 0;
5592 else
5593 return 4;
5594}
5595
5596static int be_roce_map_pci_bars(struct be_adapter *adapter)
5597{
5598 if (skyhawk_chip(adapter)) {
5599 adapter->roce_db.size = 4096;
5600 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5601 db_bar(adapter));
5602 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5603 db_bar(adapter));
5604 }
5605 return 0;
5606}
5607
5608static int be_map_pci_bars(struct be_adapter *adapter)
5609{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005610 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005611 u8 __iomem *addr;
5612 u32 sli_intf;
5613
5614 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5615 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5616 SLI_INTF_FAMILY_SHIFT;
5617 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5618
5619 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005620 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005621 if (!adapter->csr)
5622 return -ENOMEM;
5623 }
5624
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005625 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005626 if (!addr)
5627 goto pci_map_err;
5628 adapter->db = addr;
5629
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005630 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5631 if (be_physfn(adapter)) {
5632 /* PCICFG is the 2nd BAR in BE2 */
5633 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5634 if (!addr)
5635 goto pci_map_err;
5636 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005637 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005638 } else {
5639 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005640 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005641 }
5642 }
5643
Sathya Perla78fad34e2015-02-23 04:20:08 -05005644 be_roce_map_pci_bars(adapter);
5645 return 0;
5646
5647pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005648 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005649 be_unmap_pci_bars(adapter);
5650 return -ENOMEM;
5651}
5652
5653static void be_drv_cleanup(struct be_adapter *adapter)
5654{
5655 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5656 struct device *dev = &adapter->pdev->dev;
5657
5658 if (mem->va)
5659 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5660
5661 mem = &adapter->rx_filter;
5662 if (mem->va)
5663 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5664
5665 mem = &adapter->stats_cmd;
5666 if (mem->va)
5667 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5668}
5669
5670/* Allocate and initialize various fields in be_adapter struct */
5671static int be_drv_init(struct be_adapter *adapter)
5672{
5673 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5674 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5675 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5676 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5677 struct device *dev = &adapter->pdev->dev;
5678 int status = 0;
5679
5680 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305681 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5682 &mbox_mem_alloc->dma,
5683 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005684 if (!mbox_mem_alloc->va)
5685 return -ENOMEM;
5686
5687 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5688 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5689 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005690
5691 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5692 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5693 &rx_filter->dma, GFP_KERNEL);
5694 if (!rx_filter->va) {
5695 status = -ENOMEM;
5696 goto free_mbox;
5697 }
5698
5699 if (lancer_chip(adapter))
5700 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5701 else if (BE2_chip(adapter))
5702 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5703 else if (BE3_chip(adapter))
5704 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5705 else
5706 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5707 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5708 &stats_cmd->dma, GFP_KERNEL);
5709 if (!stats_cmd->va) {
5710 status = -ENOMEM;
5711 goto free_rx_filter;
5712 }
5713
5714 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005715 mutex_init(&adapter->mcc_lock);
5716 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005717 spin_lock_init(&adapter->mcc_cq_lock);
5718 init_completion(&adapter->et_cmd_compl);
5719
5720 pci_save_state(adapter->pdev);
5721
5722 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305723
5724 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5725 adapter->error_recovery.resched_delay = 0;
5726 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005727 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005728
5729 adapter->rx_fc = true;
5730 adapter->tx_fc = true;
5731
5732 /* Must be a power of 2 or else MODULO will BUG_ON */
5733 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005734
5735 return 0;
5736
5737free_rx_filter:
5738 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5739free_mbox:
5740 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5741 mbox_mem_alloc->dma);
5742 return status;
5743}
5744
5745static void be_remove(struct pci_dev *pdev)
5746{
5747 struct be_adapter *adapter = pci_get_drvdata(pdev);
5748
5749 if (!adapter)
5750 return;
5751
5752 be_roce_dev_remove(adapter);
5753 be_intr_set(adapter, false);
5754
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005755 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005756
5757 unregister_netdev(adapter->netdev);
5758
5759 be_clear(adapter);
5760
Somnath Koturf72099e2016-09-07 19:57:50 +05305761 if (!pci_vfs_assigned(adapter->pdev))
5762 be_cmd_reset_function(adapter);
5763
Sathya Perla78fad34e2015-02-23 04:20:08 -05005764 /* tell fw we're done with firing cmds */
5765 be_cmd_fw_clean(adapter);
5766
5767 be_unmap_pci_bars(adapter);
5768 be_drv_cleanup(adapter);
5769
5770 pci_disable_pcie_error_reporting(pdev);
5771
5772 pci_release_regions(pdev);
5773 pci_disable_device(pdev);
5774
5775 free_netdev(adapter->netdev);
5776}
5777
Arnd Bergmann9a032592015-05-18 23:06:45 +02005778static ssize_t be_hwmon_show_temp(struct device *dev,
5779 struct device_attribute *dev_attr,
5780 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305781{
5782 struct be_adapter *adapter = dev_get_drvdata(dev);
5783
5784 /* Unit: millidegree Celsius */
5785 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5786 return -EIO;
5787 else
5788 return sprintf(buf, "%u\n",
5789 adapter->hwmon_info.be_on_die_temp * 1000);
5790}
5791
5792static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5793 be_hwmon_show_temp, NULL, 1);
5794
5795static struct attribute *be_hwmon_attrs[] = {
5796 &sensor_dev_attr_temp1_input.dev_attr.attr,
5797 NULL
5798};
5799
5800ATTRIBUTE_GROUPS(be_hwmon);
5801
Sathya Perlad3791422012-09-28 04:39:44 +00005802static char *mc_name(struct be_adapter *adapter)
5803{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305804 char *str = ""; /* default */
5805
5806 switch (adapter->mc_type) {
5807 case UMC:
5808 str = "UMC";
5809 break;
5810 case FLEX10:
5811 str = "FLEX10";
5812 break;
5813 case vNIC1:
5814 str = "vNIC-1";
5815 break;
5816 case nPAR:
5817 str = "nPAR";
5818 break;
5819 case UFP:
5820 str = "UFP";
5821 break;
5822 case vNIC2:
5823 str = "vNIC-2";
5824 break;
5825 default:
5826 str = "";
5827 }
5828
5829 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005830}
5831
5832static inline char *func_name(struct be_adapter *adapter)
5833{
5834 return be_physfn(adapter) ? "PF" : "VF";
5835}
5836
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005837static inline char *nic_name(struct pci_dev *pdev)
5838{
5839 switch (pdev->device) {
5840 case OC_DEVICE_ID1:
5841 return OC_NAME;
5842 case OC_DEVICE_ID2:
5843 return OC_NAME_BE;
5844 case OC_DEVICE_ID3:
5845 case OC_DEVICE_ID4:
5846 return OC_NAME_LANCER;
5847 case BE_DEVICE_ID2:
5848 return BE3_NAME;
5849 case OC_DEVICE_ID5:
5850 case OC_DEVICE_ID6:
5851 return OC_NAME_SH;
5852 default:
5853 return BE_NAME;
5854 }
5855}
5856
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005857static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005858{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005859 struct be_adapter *adapter;
5860 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005861 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005862
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305863 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5864
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005865 status = pci_enable_device(pdev);
5866 if (status)
5867 goto do_none;
5868
5869 status = pci_request_regions(pdev, DRV_NAME);
5870 if (status)
5871 goto disable_dev;
5872 pci_set_master(pdev);
5873
Sathya Perla7f640062012-06-05 19:37:20 +00005874 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305875 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005876 status = -ENOMEM;
5877 goto rel_reg;
5878 }
5879 adapter = netdev_priv(netdev);
5880 adapter->pdev = pdev;
5881 pci_set_drvdata(pdev, adapter);
5882 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005883 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005884
Russell King4c15c242013-06-26 23:49:11 +01005885 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005886 if (!status) {
5887 netdev->features |= NETIF_F_HIGHDMA;
5888 } else {
Russell King4c15c242013-06-26 23:49:11 +01005889 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005890 if (status) {
5891 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5892 goto free_netdev;
5893 }
5894 }
5895
Kalesh AP2f951a92014-09-12 17:39:21 +05305896 status = pci_enable_pcie_error_reporting(pdev);
5897 if (!status)
5898 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005899
Sathya Perla78fad34e2015-02-23 04:20:08 -05005900 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005901 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005902 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005903
Sathya Perla78fad34e2015-02-23 04:20:08 -05005904 status = be_drv_init(adapter);
5905 if (status)
5906 goto unmap_bars;
5907
Sathya Perla5fb379e2009-06-18 00:02:59 +00005908 status = be_setup(adapter);
5909 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005910 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005911
Sathya Perla3abcded2010-10-03 22:12:27 -07005912 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005913 status = register_netdev(netdev);
5914 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005915 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005916
Parav Pandit045508a2012-03-26 14:27:13 +00005917 be_roce_dev_add(adapter);
5918
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305919 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305920 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005921
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305922 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005923 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305924 adapter->hwmon_info.hwmon_dev =
5925 devm_hwmon_device_register_with_groups(&pdev->dev,
5926 DRV_NAME,
5927 adapter,
5928 be_hwmon_groups);
5929 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5930 }
5931
Sathya Perlad3791422012-09-28 04:39:44 +00005932 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005933 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005934
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005935 return 0;
5936
Sathya Perla5fb379e2009-06-18 00:02:59 +00005937unsetup:
5938 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005939drv_cleanup:
5940 be_drv_cleanup(adapter);
5941unmap_bars:
5942 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005943free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005944 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005945rel_reg:
5946 pci_release_regions(pdev);
5947disable_dev:
5948 pci_disable_device(pdev);
5949do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005950 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005951 return status;
5952}
5953
5954static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5955{
5956 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005957
Ajit Khaparded4360d62013-11-22 12:51:09 -06005958 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005959 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005960
Kalesh AP87ac1a52015-02-23 04:20:15 -05005961 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005962
5963 pci_save_state(pdev);
5964 pci_disable_device(pdev);
5965 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5966 return 0;
5967}
5968
Kalesh AP484d76f2015-02-23 04:20:14 -05005969static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005970{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005971 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005972 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005973
5974 status = pci_enable_device(pdev);
5975 if (status)
5976 return status;
5977
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005978 pci_restore_state(pdev);
5979
Kalesh AP484d76f2015-02-23 04:20:14 -05005980 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005981 if (status)
5982 return status;
5983
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305984 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005985
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005986 return 0;
5987}
5988
Sathya Perla82456b02010-02-17 01:35:37 +00005989/*
5990 * An FLR will stop BE from DMAing any data.
5991 */
5992static void be_shutdown(struct pci_dev *pdev)
5993{
5994 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005995
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005996 if (!adapter)
5997 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005998
Devesh Sharmad114f992014-06-10 19:32:15 +05305999 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00006000 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006001 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00006002
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006003 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006004
Ajit Khaparde57841862011-04-06 18:08:43 +00006005 be_cmd_reset_function(adapter);
6006
Sathya Perla82456b02010-02-17 01:35:37 +00006007 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006008}
6009
Sathya Perlacf588472010-02-14 21:22:01 +00006010static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05306011 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00006012{
6013 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006014
6015 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6016
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306017 be_roce_dev_remove(adapter);
6018
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306019 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6020 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00006021
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006022 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00006023
Kalesh AP87ac1a52015-02-23 04:20:15 -05006024 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006025 }
Sathya Perlacf588472010-02-14 21:22:01 +00006026
6027 if (state == pci_channel_io_perm_failure)
6028 return PCI_ERS_RESULT_DISCONNECT;
6029
6030 pci_disable_device(pdev);
6031
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006032 /* The error could cause the FW to trigger a flash debug dump.
6033 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006034 * can cause it not to recover; wait for it to finish.
6035 * Wait only for first function as it is needed only once per
6036 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006037 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006038 if (pdev->devfn == 0)
6039 ssleep(30);
6040
Sathya Perlacf588472010-02-14 21:22:01 +00006041 return PCI_ERS_RESULT_NEED_RESET;
6042}
6043
6044static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6045{
6046 struct be_adapter *adapter = pci_get_drvdata(pdev);
6047 int status;
6048
6049 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006050
6051 status = pci_enable_device(pdev);
6052 if (status)
6053 return PCI_ERS_RESULT_DISCONNECT;
6054
6055 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006056 pci_restore_state(pdev);
6057
6058 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006059 dev_info(&adapter->pdev->dev,
6060 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006061 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006062 if (status)
6063 return PCI_ERS_RESULT_DISCONNECT;
6064
Sathya Perlad6b6d982012-09-05 01:56:48 +00006065 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306066 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006067 return PCI_ERS_RESULT_RECOVERED;
6068}
6069
6070static void be_eeh_resume(struct pci_dev *pdev)
6071{
6072 int status = 0;
6073 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006074
6075 dev_info(&adapter->pdev->dev, "EEH resume\n");
6076
6077 pci_save_state(pdev);
6078
Kalesh AP484d76f2015-02-23 04:20:14 -05006079 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006080 if (status)
6081 goto err;
6082
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306083 be_roce_dev_add(adapter);
6084
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306085 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006086 return;
6087err:
6088 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006089}
6090
Vasundhara Volamace40af2015-03-04 00:44:34 -05006091static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6092{
6093 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006094 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006095 int status;
6096
6097 if (!num_vfs)
6098 be_vf_clear(adapter);
6099
6100 adapter->num_vfs = num_vfs;
6101
6102 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6103 dev_warn(&pdev->dev,
6104 "Cannot disable VFs while they are assigned\n");
6105 return -EBUSY;
6106 }
6107
6108 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6109 * are equally distributed across the max-number of VFs. The user may
6110 * request only a subset of the max-vfs to be enabled.
6111 * Based on num_vfs, redistribute the resources across num_vfs so that
6112 * each VF will have access to more number of resources.
6113 * This facility is not available in BE3 FW.
6114 * Also, this is done by FW in Lancer chip.
6115 */
6116 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006117 be_calculate_vf_res(adapter, adapter->num_vfs,
6118 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006119 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006120 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006121 if (status)
6122 dev_err(&pdev->dev,
6123 "Failed to optimize SR-IOV resources\n");
6124 }
6125
6126 status = be_get_resources(adapter);
6127 if (status)
6128 return be_cmd_status(status);
6129
6130 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6131 rtnl_lock();
6132 status = be_update_queues(adapter);
6133 rtnl_unlock();
6134 if (status)
6135 return be_cmd_status(status);
6136
6137 if (adapter->num_vfs)
6138 status = be_vf_setup(adapter);
6139
6140 if (!status)
6141 return adapter->num_vfs;
6142
6143 return 0;
6144}
6145
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006146static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006147 .error_detected = be_eeh_err_detected,
6148 .slot_reset = be_eeh_reset,
6149 .resume = be_eeh_resume,
6150};
6151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006152static struct pci_driver be_driver = {
6153 .name = DRV_NAME,
6154 .id_table = be_dev_ids,
6155 .probe = be_probe,
6156 .remove = be_remove,
6157 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006158 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006159 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006160 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006161 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006162};
6163
6164static int __init be_init_module(void)
6165{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306166 int status;
6167
Joe Perches8e95a202009-12-03 07:58:21 +00006168 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6169 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006170 printk(KERN_WARNING DRV_NAME
6171 " : Module param rx_frag_size must be 2048/4096/8192."
6172 " Using 2048\n");
6173 rx_frag_size = 2048;
6174 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006175
Vasundhara Volamace40af2015-03-04 00:44:34 -05006176 if (num_vfs > 0) {
6177 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6178 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6179 }
6180
Sathya Perlab7172412016-07-27 05:26:18 -04006181 be_wq = create_singlethread_workqueue("be_wq");
6182 if (!be_wq) {
6183 pr_warn(DRV_NAME "workqueue creation failed\n");
6184 return -1;
6185 }
6186
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306187 be_err_recovery_workq =
6188 create_singlethread_workqueue("be_err_recover");
6189 if (!be_err_recovery_workq)
6190 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6191
6192 status = pci_register_driver(&be_driver);
6193 if (status) {
6194 destroy_workqueue(be_wq);
6195 be_destroy_err_recovery_workq();
6196 }
6197 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006198}
6199module_init(be_init_module);
6200
6201static void __exit be_exit_module(void)
6202{
6203 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006204
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306205 be_destroy_err_recovery_workq();
6206
Sathya Perlab7172412016-07-27 05:26:18 -04006207 if (be_wq)
6208 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006209}
6210module_exit(be_exit_module);