blob: a1c9920b2452467b6a9e85675c5475ba2fd9d569 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
47struct workqueue_struct *be_err_recovery_workq;
48
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
63struct workqueue_struct *be_wq;
64
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Suresh Reddy988d44b2016-09-07 19:57:52 +0530272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
278 if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
279 mac)) {
280 /* mac already added, skip addition */
281 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
282 return 0;
283 }
284 }
285
286 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
287 &adapter->pmac_id[0], 0);
288}
289
290static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
291{
292 int i;
293
294 /* Skip deletion if the programmed mac is
295 * being used in uc-list
296 */
297 for (i = 0; i < adapter->uc_macs; i++) {
298 if (adapter->pmac_id[i + 1] == pmac_id)
299 return;
300 }
301 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
302}
303
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700304static int be_mac_addr_set(struct net_device *netdev, void *p)
305{
306 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530309 int status;
310 u8 mac[ETH_ALEN];
Suresh Reddy988d44b2016-09-07 19:57:52 +0530311 u32 old_pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000313 if (!is_valid_ether_addr(addr->sa_data))
314 return -EADDRNOTAVAIL;
315
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530316 /* Proceed further only if, User provided MAC is different
317 * from active MAC
318 */
319 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
320 return 0;
321
Kalesh APbcc84142015-08-05 03:27:48 -0400322 /* if device is not running, copy MAC to netdev->dev_addr */
323 if (!netif_running(netdev))
324 goto done;
325
Sathya Perla5a712c12013-07-23 15:24:59 +0530326 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
327 * privilege or if PF did not provision the new MAC address.
328 * On BE3, this cmd will always fail if the VF doesn't have the
329 * FILTMGMT privilege. This failure is OK, only if the PF programmed
330 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000331 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530332 mutex_lock(&adapter->rx_filter_lock);
333 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
Sathya Perla5a712c12013-07-23 15:24:59 +0530334 if (!status) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530335
336 /* Delete the old programmed MAC. This call may fail if the
337 * old MAC was already deleted by the PF driver.
338 */
339 if (adapter->pmac_id[0] != old_pmac_id)
Suresh Reddy988d44b2016-09-07 19:57:52 +0530340 be_dev_mac_del(adapter, old_pmac_id);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000341 }
342
Suresh Reddy988d44b2016-09-07 19:57:52 +0530343 mutex_unlock(&adapter->rx_filter_lock);
Sathya Perla5a712c12013-07-23 15:24:59 +0530344 /* Decide if the new MAC is successfully activated only after
345 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000346 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530347 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
Suresh Reddyb188f092014-01-15 13:23:39 +0530348 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000349 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000350 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700351
Sathya Perla5a712c12013-07-23 15:24:59 +0530352 /* The MAC change did not happen, either due to lack of privilege
353 * or PF didn't pre-provision.
354 */
dingtianhong61d23e92013-12-30 15:40:43 +0800355 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530356 status = -EPERM;
357 goto err;
358 }
Kalesh APbcc84142015-08-05 03:27:48 -0400359done:
360 ether_addr_copy(netdev->dev_addr, addr->sa_data);
361 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000362 return 0;
363err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530364 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700365 return status;
366}
367
Sathya Perlaca34fe32012-11-06 17:48:56 +0000368/* BE2 supports only v0 cmd */
369static void *hw_stats_from_cmd(struct be_adapter *adapter)
370{
371 if (BE2_chip(adapter)) {
372 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
373
374 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500375 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000376 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
377
378 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500379 } else {
380 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
381
382 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000383 }
384}
385
386/* BE2 supports only v0 cmd */
387static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
388{
389 if (BE2_chip(adapter)) {
390 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
391
392 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500393 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
395
396 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500397 } else {
398 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
399
400 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000401 }
402}
403
404static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000406 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
407 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
408 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 &rxf_stats->port[adapter->port_num];
411 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
Sathya Perlaac124ff2011-07-25 19:10:14 +0000413 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->rx_pause_frames = port_stats->rx_pause_frames;
415 drvs->rx_crc_errors = port_stats->rx_crc_errors;
416 drvs->rx_control_frames = port_stats->rx_control_frames;
417 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
418 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
419 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
420 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
421 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
422 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
423 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
424 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
425 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
426 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
427 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 drvs->rx_dropped_header_too_small =
430 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000431 drvs->rx_address_filtered =
432 port_stats->rx_address_filtered +
433 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->rx_alignment_symbol_errors =
435 port_stats->rx_alignment_symbol_errors;
436
437 drvs->tx_pauseframes = port_stats->tx_pauseframes;
438 drvs->tx_controlframes = port_stats->tx_controlframes;
439
440 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000441 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000443 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->forwarded_packets = rxf_stats->forwarded_packets;
447 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000448 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
449 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000450 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
451}
452
Sathya Perlaca34fe32012-11-06 17:48:56 +0000453static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000454{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000455 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
456 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
457 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000458 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000459 &rxf_stats->port[adapter->port_num];
460 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461
Sathya Perlaac124ff2011-07-25 19:10:14 +0000462 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000463 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
464 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000465 drvs->rx_pause_frames = port_stats->rx_pause_frames;
466 drvs->rx_crc_errors = port_stats->rx_crc_errors;
467 drvs->rx_control_frames = port_stats->rx_control_frames;
468 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
469 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
470 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
471 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
472 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
473 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
474 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
475 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
476 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
477 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
478 drvs->rx_dropped_header_too_small =
479 port_stats->rx_dropped_header_too_small;
480 drvs->rx_input_fifo_overflow_drop =
481 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000482 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000483 drvs->rx_alignment_symbol_errors =
484 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000485 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000486 drvs->tx_pauseframes = port_stats->tx_pauseframes;
487 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000488 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->jabber_events = port_stats->jabber_events;
490 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 drvs->forwarded_packets = rxf_stats->forwarded_packets;
493 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
495 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000496 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
497}
498
Ajit Khaparde61000862013-10-03 16:16:33 -0500499static void populate_be_v2_stats(struct be_adapter *adapter)
500{
501 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
502 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
503 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
504 struct be_port_rxf_stats_v2 *port_stats =
505 &rxf_stats->port[adapter->port_num];
506 struct be_drv_stats *drvs = &adapter->drv_stats;
507
508 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
509 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
510 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
511 drvs->rx_pause_frames = port_stats->rx_pause_frames;
512 drvs->rx_crc_errors = port_stats->rx_crc_errors;
513 drvs->rx_control_frames = port_stats->rx_control_frames;
514 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
515 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
516 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
517 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
518 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
519 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
520 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
521 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
522 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
523 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
524 drvs->rx_dropped_header_too_small =
525 port_stats->rx_dropped_header_too_small;
526 drvs->rx_input_fifo_overflow_drop =
527 port_stats->rx_input_fifo_overflow_drop;
528 drvs->rx_address_filtered = port_stats->rx_address_filtered;
529 drvs->rx_alignment_symbol_errors =
530 port_stats->rx_alignment_symbol_errors;
531 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
532 drvs->tx_pauseframes = port_stats->tx_pauseframes;
533 drvs->tx_controlframes = port_stats->tx_controlframes;
534 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
535 drvs->jabber_events = port_stats->jabber_events;
536 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
537 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
538 drvs->forwarded_packets = rxf_stats->forwarded_packets;
539 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
540 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
541 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
542 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530543 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500544 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
545 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
546 drvs->rx_roce_frames = port_stats->roce_frames_received;
547 drvs->roce_drops_crc = port_stats->roce_drops_crc;
548 drvs->roce_drops_payload_len =
549 port_stats->roce_drops_payload_len;
550 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500551}
552
Selvin Xavier005d5692011-05-16 07:36:35 +0000553static void populate_lancer_stats(struct be_adapter *adapter)
554{
Selvin Xavier005d5692011-05-16 07:36:35 +0000555 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530556 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000557
558 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
559 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
560 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
561 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000562 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000563 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000564 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
565 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
566 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
567 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
568 drvs->rx_dropped_tcp_length =
569 pport_stats->rx_dropped_invalid_tcp_length;
570 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
571 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
572 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
573 drvs->rx_dropped_header_too_small =
574 pport_stats->rx_dropped_header_too_small;
575 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000576 drvs->rx_address_filtered =
577 pport_stats->rx_address_filtered +
578 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000579 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000580 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000581 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
582 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000583 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000584 drvs->forwarded_packets = pport_stats->num_forwards_lo;
585 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000586 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000587 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000588}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000589
Sathya Perla09c1c682011-08-22 19:41:53 +0000590static void accumulate_16bit_val(u32 *acc, u16 val)
591{
592#define lo(x) (x & 0xFFFF)
593#define hi(x) (x & 0xFFFF0000)
594 bool wrapped = val < lo(*acc);
595 u32 newacc = hi(*acc) + val;
596
597 if (wrapped)
598 newacc += 65536;
599 ACCESS_ONCE(*acc) = newacc;
600}
601
Jingoo Han4188e7d2013-08-05 18:02:02 +0900602static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530603 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000604{
605 if (!BEx_chip(adapter))
606 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
607 else
608 /* below erx HW counter can actually wrap around after
609 * 65535. Driver accumulates a 32-bit value
610 */
611 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
612 (u16)erx_stat);
613}
614
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000615void be_parse_stats(struct be_adapter *adapter)
616{
Ajit Khaparde61000862013-10-03 16:16:33 -0500617 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000618 struct be_rx_obj *rxo;
619 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000620 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000621
Sathya Perlaca34fe32012-11-06 17:48:56 +0000622 if (lancer_chip(adapter)) {
623 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000624 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000625 if (BE2_chip(adapter))
626 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500627 else if (BE3_chip(adapter))
628 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000629 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500630 else
631 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000632
Ajit Khaparde61000862013-10-03 16:16:33 -0500633 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000634 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000635 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
636 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000637 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000638 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639}
640
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530642 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700646 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000647 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000648 u64 pkts, bytes;
649 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700650 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla3abcded2010-10-03 22:12:27 -0700652 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000653 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530654
Sathya Perlaab1594e2011-07-25 19:10:15 +0000655 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700656 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000657 pkts = rx_stats(rxo)->rx_pkts;
658 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700659 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000660 stats->rx_packets += pkts;
661 stats->rx_bytes += bytes;
662 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
663 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
664 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700665 }
666
Sathya Perla3c8def92011-06-12 20:01:58 +0000667 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000668 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530669
Sathya Perlaab1594e2011-07-25 19:10:15 +0000670 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700671 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 pkts = tx_stats(txo)->tx_pkts;
673 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700674 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000675 stats->tx_packets += pkts;
676 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000677 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678
679 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000680 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000681 drvs->rx_alignment_symbol_errors +
682 drvs->rx_in_range_errors +
683 drvs->rx_out_range_errors +
684 drvs->rx_frame_too_long +
685 drvs->rx_dropped_too_small +
686 drvs->rx_dropped_too_short +
687 drvs->rx_dropped_header_too_small +
688 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000689 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000692 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000693 drvs->rx_out_range_errors +
694 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000695
Sathya Perlaab1594e2011-07-25 19:10:15 +0000696 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
698 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000699 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000700
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701 /* receiver fifo overrun */
702 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000703 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000704 drvs->rx_input_fifo_overflow_drop +
705 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000706 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700707}
708
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000709void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711 struct net_device *netdev = adapter->netdev;
712
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000713 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000714 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000715 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700716 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000717
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530718 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000719 netif_carrier_on(netdev);
720 else
721 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200722
723 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724}
725
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500726static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700727{
Sathya Perla3c8def92011-06-12 20:01:58 +0000728 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530729 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000730
Sathya Perlaab1594e2011-07-25 19:10:15 +0000731 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000732 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500733 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530734 stats->tx_pkts += tx_pkts;
735 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
736 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000737 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738}
739
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500740/* Returns number of WRBs needed for the skb */
741static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500743 /* +1 for the header wrb */
744 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745}
746
747static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
748{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500749 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
750 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
751 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
752 wrb->rsvd0 = 0;
753}
754
755/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
756 * to avoid the swap and shift/mask operations in wrb_fill().
757 */
758static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
759{
760 wrb->frag_pa_hi = 0;
761 wrb->frag_pa_lo = 0;
762 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000763 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764}
765
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000766static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530767 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000768{
769 u8 vlan_prio;
770 u16 vlan_tag;
771
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100772 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000773 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
774 /* If vlan priority provided by OS is NOT in available bmap */
775 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
776 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500777 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000778
779 return vlan_tag;
780}
781
Sathya Perlac9c47142014-03-27 10:46:19 +0530782/* Used only for IP tunnel packets */
783static u16 skb_inner_ip_proto(struct sk_buff *skb)
784{
785 return (inner_ip_hdr(skb)->version == 4) ?
786 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
787}
788
789static u16 skb_ip_proto(struct sk_buff *skb)
790{
791 return (ip_hdr(skb)->version == 4) ?
792 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
793}
794
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530795static inline bool be_is_txq_full(struct be_tx_obj *txo)
796{
797 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
798}
799
800static inline bool be_can_txq_wake(struct be_tx_obj *txo)
801{
802 return atomic_read(&txo->q.used) < txo->q.len / 2;
803}
804
805static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
806{
807 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
808}
809
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530810static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
811 struct sk_buff *skb,
812 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530814 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000816 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530817 BE_WRB_F_SET(wrb_params->features, LSO, 1);
818 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000819 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530820 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530822 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530823 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530824 proto = skb_inner_ip_proto(skb);
825 } else {
826 proto = skb_ip_proto(skb);
827 }
828 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530829 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530830 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530831 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832 }
833
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100834 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530835 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
836 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837 }
838
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530839 BE_WRB_F_SET(wrb_params->features, CRC, 1);
840}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500841
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530842static void wrb_fill_hdr(struct be_adapter *adapter,
843 struct be_eth_hdr_wrb *hdr,
844 struct be_wrb_params *wrb_params,
845 struct sk_buff *skb)
846{
847 memset(hdr, 0, sizeof(*hdr));
848
849 SET_TX_WRB_HDR_BITS(crc, hdr,
850 BE_WRB_F_GET(wrb_params->features, CRC));
851 SET_TX_WRB_HDR_BITS(ipcs, hdr,
852 BE_WRB_F_GET(wrb_params->features, IPCS));
853 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
854 BE_WRB_F_GET(wrb_params->features, TCPCS));
855 SET_TX_WRB_HDR_BITS(udpcs, hdr,
856 BE_WRB_F_GET(wrb_params->features, UDPCS));
857
858 SET_TX_WRB_HDR_BITS(lso, hdr,
859 BE_WRB_F_GET(wrb_params->features, LSO));
860 SET_TX_WRB_HDR_BITS(lso6, hdr,
861 BE_WRB_F_GET(wrb_params->features, LSO6));
862 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
863
864 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
865 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500866 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530867 SET_TX_WRB_HDR_BITS(event, hdr,
868 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
869 SET_TX_WRB_HDR_BITS(vlan, hdr,
870 BE_WRB_F_GET(wrb_params->features, VLAN));
871 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
872
873 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
874 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530875 SET_TX_WRB_HDR_BITS(mgmt, hdr,
876 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877}
878
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000879static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530880 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000881{
882 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500883 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000884
Sathya Perla7101e112010-03-22 20:41:12 +0000885
Sathya Perlaf986afc2015-02-06 08:18:43 -0500886 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
887 (u64)le32_to_cpu(wrb->frag_pa_lo);
888 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000889 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500890 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000891 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500892 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000893 }
894}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530896/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530897static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700898{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530899 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700900
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530901 queue_head_inc(&txo->q);
902 return head;
903}
904
905/* Set up the WRB header for xmit */
906static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
907 struct be_tx_obj *txo,
908 struct be_wrb_params *wrb_params,
909 struct sk_buff *skb, u16 head)
910{
911 u32 num_frags = skb_wrb_cnt(skb);
912 struct be_queue_info *txq = &txo->q;
913 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
914
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530915 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500916 be_dws_cpu_to_le(hdr, sizeof(*hdr));
917
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500918 BUG_ON(txo->sent_skb_list[head]);
919 txo->sent_skb_list[head] = skb;
920 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530921 atomic_add(num_frags, &txq->used);
922 txo->last_req_wrb_cnt = num_frags;
923 txo->pend_wrb_cnt += num_frags;
924}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700925
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530926/* Setup a WRB fragment (buffer descriptor) for xmit */
927static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
928 int len)
929{
930 struct be_eth_wrb *wrb;
931 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530933 wrb = queue_head_node(txq);
934 wrb_fill(wrb, busaddr, len);
935 queue_head_inc(txq);
936}
937
938/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
939 * was invoked. The producer index is restored to the previous packet and the
940 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
941 */
942static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530943 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530944 u32 copied)
945{
946 struct device *dev;
947 struct be_eth_wrb *wrb;
948 struct be_queue_info *txq = &txo->q;
949
950 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500951 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530952
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500953 /* skip the first wrb (hdr); it's not mapped */
954 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000955 while (copied) {
956 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000957 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000958 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500959 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000960 queue_head_inc(txq);
961 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530962
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500963 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530964}
965
966/* Enqueue the given packet for transmit. This routine allocates WRBs for the
967 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
968 * of WRBs used up by the packet.
969 */
970static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
971 struct sk_buff *skb,
972 struct be_wrb_params *wrb_params)
973{
974 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
975 struct device *dev = &adapter->pdev->dev;
976 struct be_queue_info *txq = &txo->q;
977 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530978 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530979 dma_addr_t busaddr;
980 int len;
981
982 head = be_tx_get_wrb_hdr(txo);
983
984 if (skb->len > skb->data_len) {
985 len = skb_headlen(skb);
986
987 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
988 if (dma_mapping_error(dev, busaddr))
989 goto dma_err;
990 map_single = true;
991 be_tx_setup_wrb_frag(txo, busaddr, len);
992 copied += len;
993 }
994
995 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
996 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
997 len = skb_frag_size(frag);
998
999 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1000 if (dma_mapping_error(dev, busaddr))
1001 goto dma_err;
1002 be_tx_setup_wrb_frag(txo, busaddr, len);
1003 copied += len;
1004 }
1005
1006 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1007
1008 be_tx_stats_update(txo, skb);
1009 return wrb_cnt;
1010
1011dma_err:
1012 adapter->drv_stats.dma_map_errors++;
1013 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +00001014 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015}
1016
Sathya Perlaf7062ee2015-02-06 08:18:35 -05001017static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1018{
1019 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1020}
1021
Somnath Kotur93040ae2012-06-26 22:32:10 +00001022static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001023 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301024 struct be_wrb_params
1025 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +00001026{
1027 u16 vlan_tag = 0;
1028
1029 skb = skb_share_check(skb, GFP_ATOMIC);
1030 if (unlikely(!skb))
1031 return skb;
1032
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001033 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001034 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301035
1036 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1037 if (!vlan_tag)
1038 vlan_tag = adapter->pvid;
1039 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1040 * skip VLAN insertion
1041 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301042 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301043 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001044
1045 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001046 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1047 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001048 if (unlikely(!skb))
1049 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001050 skb->vlan_tci = 0;
1051 }
1052
1053 /* Insert the outer VLAN, if any */
1054 if (adapter->qnq_vid) {
1055 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001056 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1057 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001058 if (unlikely(!skb))
1059 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301060 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001061 }
1062
Somnath Kotur93040ae2012-06-26 22:32:10 +00001063 return skb;
1064}
1065
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001066static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1067{
1068 struct ethhdr *eh = (struct ethhdr *)skb->data;
1069 u16 offset = ETH_HLEN;
1070
1071 if (eh->h_proto == htons(ETH_P_IPV6)) {
1072 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1073
1074 offset += sizeof(struct ipv6hdr);
1075 if (ip6h->nexthdr != NEXTHDR_TCP &&
1076 ip6h->nexthdr != NEXTHDR_UDP) {
1077 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301078 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001079
1080 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1081 if (ehdr->hdrlen == 0xff)
1082 return true;
1083 }
1084 }
1085 return false;
1086}
1087
1088static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1089{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001090 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001091}
1092
Sathya Perla748b5392014-05-09 13:29:13 +05301093static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001094{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001095 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001096}
1097
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301098static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1099 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301100 struct be_wrb_params
1101 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001103 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001104 unsigned int eth_hdr_len;
1105 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001106
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001107 /* For padded packets, BE HW modifies tot_len field in IP header
1108 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001109 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001110 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001111 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1112 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001113 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001114 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001115 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001116 ip = (struct iphdr *)ip_hdr(skb);
1117 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1118 }
1119
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001120 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301121 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001122 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301123 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001124 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301125 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001126
Somnath Kotur93040ae2012-06-26 22:32:10 +00001127 /* HW has a bug wherein it will calculate CSUM for VLAN
1128 * pkts even though it is disabled.
1129 * Manually insert VLAN in pkt.
1130 */
1131 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001132 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301133 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001134 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301135 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001136 }
1137
1138 /* HW may lockup when VLAN HW tagging is requested on
1139 * certain ipv6 packets. Drop such pkts if the HW workaround to
1140 * skip HW tagging is not enabled by FW.
1141 */
1142 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301143 (adapter->pvid || adapter->qnq_vid) &&
1144 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001145 goto tx_drop;
1146
1147 /* Manual VLAN tag insertion to prevent:
1148 * ASIC lockup when the ASIC inserts VLAN tag into
1149 * certain ipv6 packets. Insert VLAN tags in driver,
1150 * and set event, completion, vlan bits accordingly
1151 * in the Tx WRB.
1152 */
1153 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1154 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301155 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001156 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301157 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001158 }
1159
Sathya Perlaee9c7992013-05-22 23:04:55 +00001160 return skb;
1161tx_drop:
1162 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301163err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001164 return NULL;
1165}
1166
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301167static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1168 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301169 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301170{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301171 int err;
1172
Suresh Reddy8227e992015-10-12 03:47:19 -04001173 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1174 * packets that are 32b or less may cause a transmit stall
1175 * on that port. The workaround is to pad such packets
1176 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301177 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001178 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001179 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301180 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301181 }
1182
1183 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301184 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301185 if (!skb)
1186 return NULL;
1187 }
1188
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301189 /* The stack can send us skbs with length greater than
1190 * what the HW can handle. Trim the extra bytes.
1191 */
1192 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1193 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1194 WARN_ON(err);
1195
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301196 return skb;
1197}
1198
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001199static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1200{
1201 struct be_queue_info *txq = &txo->q;
1202 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1203
1204 /* Mark the last request eventable if it hasn't been marked already */
1205 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1206 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1207
1208 /* compose a dummy wrb if there are odd set of wrbs to notify */
1209 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001210 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001211 queue_head_inc(txq);
1212 atomic_inc(&txq->used);
1213 txo->pend_wrb_cnt++;
1214 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1215 TX_HDR_WRB_NUM_SHIFT);
1216 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1217 TX_HDR_WRB_NUM_SHIFT);
1218 }
1219 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1220 txo->pend_wrb_cnt = 0;
1221}
1222
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301223/* OS2BMC related */
1224
1225#define DHCP_CLIENT_PORT 68
1226#define DHCP_SERVER_PORT 67
1227#define NET_BIOS_PORT1 137
1228#define NET_BIOS_PORT2 138
1229#define DHCPV6_RAS_PORT 547
1230
1231#define is_mc_allowed_on_bmc(adapter, eh) \
1232 (!is_multicast_filt_enabled(adapter) && \
1233 is_multicast_ether_addr(eh->h_dest) && \
1234 !is_broadcast_ether_addr(eh->h_dest))
1235
1236#define is_bc_allowed_on_bmc(adapter, eh) \
1237 (!is_broadcast_filt_enabled(adapter) && \
1238 is_broadcast_ether_addr(eh->h_dest))
1239
1240#define is_arp_allowed_on_bmc(adapter, skb) \
1241 (is_arp(skb) && is_arp_filt_enabled(adapter))
1242
1243#define is_broadcast_packet(eh, adapter) \
1244 (is_multicast_ether_addr(eh->h_dest) && \
1245 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1246
1247#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1248
1249#define is_arp_filt_enabled(adapter) \
1250 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1251
1252#define is_dhcp_client_filt_enabled(adapter) \
1253 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1254
1255#define is_dhcp_srvr_filt_enabled(adapter) \
1256 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1257
1258#define is_nbios_filt_enabled(adapter) \
1259 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1260
1261#define is_ipv6_na_filt_enabled(adapter) \
1262 (adapter->bmc_filt_mask & \
1263 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1264
1265#define is_ipv6_ra_filt_enabled(adapter) \
1266 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1267
1268#define is_ipv6_ras_filt_enabled(adapter) \
1269 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1270
1271#define is_broadcast_filt_enabled(adapter) \
1272 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1273
1274#define is_multicast_filt_enabled(adapter) \
1275 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1276
1277static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1278 struct sk_buff **skb)
1279{
1280 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1281 bool os2bmc = false;
1282
1283 if (!be_is_os2bmc_enabled(adapter))
1284 goto done;
1285
1286 if (!is_multicast_ether_addr(eh->h_dest))
1287 goto done;
1288
1289 if (is_mc_allowed_on_bmc(adapter, eh) ||
1290 is_bc_allowed_on_bmc(adapter, eh) ||
1291 is_arp_allowed_on_bmc(adapter, (*skb))) {
1292 os2bmc = true;
1293 goto done;
1294 }
1295
1296 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1297 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1298 u8 nexthdr = hdr->nexthdr;
1299
1300 if (nexthdr == IPPROTO_ICMPV6) {
1301 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1302
1303 switch (icmp6->icmp6_type) {
1304 case NDISC_ROUTER_ADVERTISEMENT:
1305 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1306 goto done;
1307 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1308 os2bmc = is_ipv6_na_filt_enabled(adapter);
1309 goto done;
1310 default:
1311 break;
1312 }
1313 }
1314 }
1315
1316 if (is_udp_pkt((*skb))) {
1317 struct udphdr *udp = udp_hdr((*skb));
1318
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001319 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301320 case DHCP_CLIENT_PORT:
1321 os2bmc = is_dhcp_client_filt_enabled(adapter);
1322 goto done;
1323 case DHCP_SERVER_PORT:
1324 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1325 goto done;
1326 case NET_BIOS_PORT1:
1327 case NET_BIOS_PORT2:
1328 os2bmc = is_nbios_filt_enabled(adapter);
1329 goto done;
1330 case DHCPV6_RAS_PORT:
1331 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1332 goto done;
1333 default:
1334 break;
1335 }
1336 }
1337done:
1338 /* For packets over a vlan, which are destined
1339 * to BMC, asic expects the vlan to be inline in the packet.
1340 */
1341 if (os2bmc)
1342 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1343
1344 return os2bmc;
1345}
1346
Sathya Perlaee9c7992013-05-22 23:04:55 +00001347static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1348{
1349 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001350 u16 q_idx = skb_get_queue_mapping(skb);
1351 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301352 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301353 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001354 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001355
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301356 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001357 if (unlikely(!skb))
1358 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001359
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301360 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1361
1362 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001363 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001364 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001365 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001367
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301368 /* if os2bmc is enabled and if the pkt is destined to bmc,
1369 * enqueue the pkt a 2nd time with mgmt bit set.
1370 */
1371 if (be_send_pkt_to_bmc(adapter, &skb)) {
1372 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1373 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1374 if (unlikely(!wrb_cnt))
1375 goto drop;
1376 else
1377 skb_get(skb);
1378 }
1379
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301380 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001381 netif_stop_subqueue(netdev, q_idx);
1382 tx_stats(txo)->tx_stops++;
1383 }
1384
1385 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1386 be_xmit_flush(adapter, txo);
1387
1388 return NETDEV_TX_OK;
1389drop:
1390 tx_stats(txo)->tx_drv_drops++;
1391 /* Flush the already enqueued tx requests */
1392 if (flush && txo->pend_wrb_cnt)
1393 be_xmit_flush(adapter, txo);
1394
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395 return NETDEV_TX_OK;
1396}
1397
1398static int be_change_mtu(struct net_device *netdev, int new_mtu)
1399{
1400 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301401 struct device *dev = &adapter->pdev->dev;
1402
1403 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1404 dev_info(dev, "MTU must be between %d and %d bytes\n",
1405 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 return -EINVAL;
1407 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301408
1409 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301410 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 netdev->mtu = new_mtu;
1412 return 0;
1413}
1414
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001415static inline bool be_in_all_promisc(struct be_adapter *adapter)
1416{
1417 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1418 BE_IF_FLAGS_ALL_PROMISCUOUS;
1419}
1420
1421static int be_set_vlan_promisc(struct be_adapter *adapter)
1422{
1423 struct device *dev = &adapter->pdev->dev;
1424 int status;
1425
1426 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1427 return 0;
1428
1429 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1430 if (!status) {
1431 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1432 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1433 } else {
1434 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1435 }
1436 return status;
1437}
1438
1439static int be_clear_vlan_promisc(struct be_adapter *adapter)
1440{
1441 struct device *dev = &adapter->pdev->dev;
1442 int status;
1443
1444 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1445 if (!status) {
1446 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1447 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1448 }
1449 return status;
1450}
1451
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001453 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1454 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 */
Sathya Perla10329df2012-06-05 19:37:18 +00001456static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457{
Vasundhara Volam50762662014-09-12 17:39:14 +05301458 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001459 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301460 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001461 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001462
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001463 /* No need to change the VLAN state if the I/F is in promiscuous */
1464 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001465 return 0;
1466
Sathya Perla92bf14a2013-08-27 16:57:32 +05301467 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001468 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001469
Somnath Kotur841f60f2016-07-27 05:26:15 -04001470 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1471 status = be_clear_vlan_promisc(adapter);
1472 if (status)
1473 return status;
1474 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001475 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301476 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1477 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001478
Vasundhara Volam435452a2015-03-20 06:28:23 -04001479 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001480 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001481 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001482 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001483 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1484 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301485 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001486 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001488 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489}
1490
Patrick McHardy80d5c362013-04-19 02:04:28 +00001491static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492{
1493 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001494 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495
Sathya Perlab7172412016-07-27 05:26:18 -04001496 mutex_lock(&adapter->rx_filter_lock);
1497
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001498 /* Packets with VID 0 are always received by Lancer by default */
1499 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001500 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301501
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301502 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001503 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001504
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301505 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301506 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001507
Sathya Perlab7172412016-07-27 05:26:18 -04001508 status = be_vid_config(adapter);
1509done:
1510 mutex_unlock(&adapter->rx_filter_lock);
1511 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512}
1513
Patrick McHardy80d5c362013-04-19 02:04:28 +00001514static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515{
1516 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001517 int status = 0;
1518
1519 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001521 /* Packets with VID 0 are always received by Lancer by default */
1522 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001523 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001524
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301525 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001526 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301527
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301528 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301529 adapter->vlans_added--;
1530
Sathya Perlab7172412016-07-27 05:26:18 -04001531 status = be_vid_config(adapter);
1532done:
1533 mutex_unlock(&adapter->rx_filter_lock);
1534 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535}
1536
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001537static void be_set_all_promisc(struct be_adapter *adapter)
1538{
1539 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1540 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1541}
1542
1543static void be_set_mc_promisc(struct be_adapter *adapter)
1544{
1545 int status;
1546
1547 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1548 return;
1549
1550 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1551 if (!status)
1552 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1553}
1554
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001555static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001556{
1557 int status;
1558
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001559 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1560 return;
1561
1562 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001563 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001564 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1565}
1566
1567static void be_clear_uc_promisc(struct be_adapter *adapter)
1568{
1569 int status;
1570
1571 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1572 return;
1573
1574 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1575 if (!status)
1576 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1577}
1578
1579/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1580 * We use a single callback function for both sync and unsync. We really don't
1581 * add/remove addresses through this callback. But, we use it to detect changes
1582 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1583 */
1584static int be_uc_list_update(struct net_device *netdev,
1585 const unsigned char *addr)
1586{
1587 struct be_adapter *adapter = netdev_priv(netdev);
1588
1589 adapter->update_uc_list = true;
1590 return 0;
1591}
1592
1593static int be_mc_list_update(struct net_device *netdev,
1594 const unsigned char *addr)
1595{
1596 struct be_adapter *adapter = netdev_priv(netdev);
1597
1598 adapter->update_mc_list = true;
1599 return 0;
1600}
1601
1602static void be_set_mc_list(struct be_adapter *adapter)
1603{
1604 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001605 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001606 bool mc_promisc = false;
1607 int status;
1608
Sathya Perlab7172412016-07-27 05:26:18 -04001609 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001610 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1611
1612 if (netdev->flags & IFF_PROMISC) {
1613 adapter->update_mc_list = false;
1614 } else if (netdev->flags & IFF_ALLMULTI ||
1615 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1616 /* Enable multicast promisc if num configured exceeds
1617 * what we support
1618 */
1619 mc_promisc = true;
1620 adapter->update_mc_list = false;
1621 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1622 /* Update mc-list unconditionally if the iface was previously
1623 * in mc-promisc mode and now is out of that mode.
1624 */
1625 adapter->update_mc_list = true;
1626 }
1627
Sathya Perlab7172412016-07-27 05:26:18 -04001628 if (adapter->update_mc_list) {
1629 int i = 0;
1630
1631 /* cache the mc-list in adapter */
1632 netdev_for_each_mc_addr(ha, netdev) {
1633 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1634 i++;
1635 }
1636 adapter->mc_count = netdev_mc_count(netdev);
1637 }
1638 netif_addr_unlock_bh(netdev);
1639
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001640 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001641 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001642 } else if (adapter->update_mc_list) {
1643 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1644 if (!status)
1645 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1646 else
1647 be_set_mc_promisc(adapter);
1648
1649 adapter->update_mc_list = false;
1650 }
1651}
1652
1653static void be_clear_mc_list(struct be_adapter *adapter)
1654{
1655 struct net_device *netdev = adapter->netdev;
1656
1657 __dev_mc_unsync(netdev, NULL);
1658 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001659 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001660}
1661
Suresh Reddy988d44b2016-09-07 19:57:52 +05301662static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1663{
1664 if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1665 adapter->netdev->dev_addr)) {
1666 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1667 return 0;
1668 }
1669
1670 return be_cmd_pmac_add(adapter,
1671 (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1672 adapter->if_handle,
1673 &adapter->pmac_id[uc_idx + 1], 0);
1674}
1675
1676static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1677{
1678 if (pmac_id == adapter->pmac_id[0])
1679 return;
1680
1681 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1682}
1683
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001684static void be_set_uc_list(struct be_adapter *adapter)
1685{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001686 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001687 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001688 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001689 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001690
Sathya Perlab7172412016-07-27 05:26:18 -04001691 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001692 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001693
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001694 if (netdev->flags & IFF_PROMISC) {
1695 adapter->update_uc_list = false;
1696 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1697 uc_promisc = true;
1698 adapter->update_uc_list = false;
1699 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1700 /* Update uc-list unconditionally if the iface was previously
1701 * in uc-promisc mode and now is out of that mode.
1702 */
1703 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001704 }
1705
Sathya Perlab7172412016-07-27 05:26:18 -04001706 if (adapter->update_uc_list) {
1707 i = 1; /* First slot is claimed by the Primary MAC */
1708
1709 /* cache the uc-list in adapter array */
1710 netdev_for_each_uc_addr(ha, netdev) {
1711 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1712 i++;
1713 }
1714 curr_uc_macs = netdev_uc_count(netdev);
1715 }
1716 netif_addr_unlock_bh(netdev);
1717
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001718 if (uc_promisc) {
1719 be_set_uc_promisc(adapter);
1720 } else if (adapter->update_uc_list) {
1721 be_clear_uc_promisc(adapter);
1722
Sathya Perlab7172412016-07-27 05:26:18 -04001723 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301724 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001725
Sathya Perlab7172412016-07-27 05:26:18 -04001726 for (i = 0; i < curr_uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301727 be_uc_mac_add(adapter, i);
Sathya Perlab7172412016-07-27 05:26:18 -04001728 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001729 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001730 }
1731}
1732
1733static void be_clear_uc_list(struct be_adapter *adapter)
1734{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001735 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001736 int i;
1737
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001738 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001739 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301740 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1741
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001742 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301743}
1744
Sathya Perlab7172412016-07-27 05:26:18 -04001745static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746{
Sathya Perlab7172412016-07-27 05:26:18 -04001747 struct net_device *netdev = adapter->netdev;
1748
1749 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750
1751 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001752 if (!be_in_all_promisc(adapter))
1753 be_set_all_promisc(adapter);
1754 } else if (be_in_all_promisc(adapter)) {
1755 /* We need to re-program the vlan-list or clear
1756 * vlan-promisc mode (if needed) when the interface
1757 * comes out of promisc mode.
1758 */
1759 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001761
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001762 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001763 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001764
1765 mutex_unlock(&adapter->rx_filter_lock);
1766}
1767
1768static void be_work_set_rx_mode(struct work_struct *work)
1769{
1770 struct be_cmd_work *cmd_work =
1771 container_of(work, struct be_cmd_work, work);
1772
1773 __be_set_rx_mode(cmd_work->adapter);
1774 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775}
1776
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001777static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1778{
1779 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001780 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001781 int status;
1782
Sathya Perla11ac75e2011-12-13 00:58:50 +00001783 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001784 return -EPERM;
1785
Sathya Perla11ac75e2011-12-13 00:58:50 +00001786 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001787 return -EINVAL;
1788
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301789 /* Proceed further only if user provided MAC is different
1790 * from active MAC
1791 */
1792 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1793 return 0;
1794
Sathya Perla3175d8c2013-07-23 15:25:03 +05301795 if (BEx_chip(adapter)) {
1796 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1797 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001798
Sathya Perla11ac75e2011-12-13 00:58:50 +00001799 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1800 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301801 } else {
1802 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1803 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001804 }
1805
Kalesh APabccf232014-07-17 16:20:24 +05301806 if (status) {
1807 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1808 mac, vf, status);
1809 return be_cmd_status(status);
1810 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001811
Kalesh APabccf232014-07-17 16:20:24 +05301812 ether_addr_copy(vf_cfg->mac_addr, mac);
1813
1814 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001815}
1816
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001817static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301818 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001819{
1820 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001821 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001822
Sathya Perla11ac75e2011-12-13 00:58:50 +00001823 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001824 return -EPERM;
1825
Sathya Perla11ac75e2011-12-13 00:58:50 +00001826 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001827 return -EINVAL;
1828
1829 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001830 vi->max_tx_rate = vf_cfg->tx_rate;
1831 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001832 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1833 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001834 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301835 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001836 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001837
1838 return 0;
1839}
1840
Vasundhara Volam435452a2015-03-20 06:28:23 -04001841static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1842{
1843 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1844 u16 vids[BE_NUM_VLANS_SUPPORTED];
1845 int vf_if_id = vf_cfg->if_handle;
1846 int status;
1847
1848 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001849 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001850 if (status)
1851 return status;
1852
1853 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1854 vids[0] = 0;
1855 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1856 if (!status)
1857 dev_info(&adapter->pdev->dev,
1858 "Cleared guest VLANs on VF%d", vf);
1859
1860 /* After TVT is enabled, disallow VFs to program VLAN filters */
1861 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1862 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1863 ~BE_PRIV_FILTMGMT, vf + 1);
1864 if (!status)
1865 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1866 }
1867 return 0;
1868}
1869
1870static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1871{
1872 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1873 struct device *dev = &adapter->pdev->dev;
1874 int status;
1875
1876 /* Reset Transparent VLAN Tagging. */
1877 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001878 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001879 if (status)
1880 return status;
1881
1882 /* Allow VFs to program VLAN filtering */
1883 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1884 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1885 BE_PRIV_FILTMGMT, vf + 1);
1886 if (!status) {
1887 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1888 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1889 }
1890 }
1891
1892 dev_info(dev,
1893 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1894 return 0;
1895}
1896
Sathya Perla748b5392014-05-09 13:29:13 +05301897static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001898{
1899 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001900 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001901 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001902
Sathya Perla11ac75e2011-12-13 00:58:50 +00001903 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001904 return -EPERM;
1905
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001906 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001907 return -EINVAL;
1908
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001909 if (vlan || qos) {
1910 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001911 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001912 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001913 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001914 }
1915
Kalesh APabccf232014-07-17 16:20:24 +05301916 if (status) {
1917 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001918 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1919 status);
Kalesh APabccf232014-07-17 16:20:24 +05301920 return be_cmd_status(status);
1921 }
1922
1923 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301924 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001925}
1926
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001927static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1928 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001929{
1930 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301931 struct device *dev = &adapter->pdev->dev;
1932 int percent_rate, status = 0;
1933 u16 link_speed = 0;
1934 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001935
Sathya Perla11ac75e2011-12-13 00:58:50 +00001936 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001937 return -EPERM;
1938
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001939 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001940 return -EINVAL;
1941
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001942 if (min_tx_rate)
1943 return -EINVAL;
1944
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301945 if (!max_tx_rate)
1946 goto config_qos;
1947
1948 status = be_cmd_link_status_query(adapter, &link_speed,
1949 &link_status, 0);
1950 if (status)
1951 goto err;
1952
1953 if (!link_status) {
1954 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301955 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301956 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001957 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001958
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301959 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1960 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1961 link_speed);
1962 status = -EINVAL;
1963 goto err;
1964 }
1965
1966 /* On Skyhawk the QOS setting must be done only as a % value */
1967 percent_rate = link_speed / 100;
1968 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1969 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1970 percent_rate);
1971 status = -EINVAL;
1972 goto err;
1973 }
1974
1975config_qos:
1976 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001977 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301978 goto err;
1979
1980 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1981 return 0;
1982
1983err:
1984 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1985 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301986 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001987}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301988
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301989static int be_set_vf_link_state(struct net_device *netdev, int vf,
1990 int link_state)
1991{
1992 struct be_adapter *adapter = netdev_priv(netdev);
1993 int status;
1994
1995 if (!sriov_enabled(adapter))
1996 return -EPERM;
1997
1998 if (vf >= adapter->num_vfs)
1999 return -EINVAL;
2000
2001 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05302002 if (status) {
2003 dev_err(&adapter->pdev->dev,
2004 "Link state change on VF %d failed: %#x\n", vf, status);
2005 return be_cmd_status(status);
2006 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302007
Kalesh APabccf232014-07-17 16:20:24 +05302008 adapter->vf_cfg[vf].plink_tracking = link_state;
2009
2010 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302011}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002012
Kalesh APe7bcbd72015-05-06 05:30:32 -04002013static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2014{
2015 struct be_adapter *adapter = netdev_priv(netdev);
2016 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2017 u8 spoofchk;
2018 int status;
2019
2020 if (!sriov_enabled(adapter))
2021 return -EPERM;
2022
2023 if (vf >= adapter->num_vfs)
2024 return -EINVAL;
2025
2026 if (BEx_chip(adapter))
2027 return -EOPNOTSUPP;
2028
2029 if (enable == vf_cfg->spoofchk)
2030 return 0;
2031
2032 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2033
2034 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2035 0, spoofchk);
2036 if (status) {
2037 dev_err(&adapter->pdev->dev,
2038 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2039 return be_cmd_status(status);
2040 }
2041
2042 vf_cfg->spoofchk = enable;
2043 return 0;
2044}
2045
Sathya Perla2632baf2013-10-01 16:00:00 +05302046static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2047 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048{
Sathya Perla2632baf2013-10-01 16:00:00 +05302049 aic->rx_pkts_prev = rx_pkts;
2050 aic->tx_reqs_prev = tx_pkts;
2051 aic->jiffies = now;
2052}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002053
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002054static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302055{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002056 struct be_adapter *adapter = eqo->adapter;
2057 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302058 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302059 struct be_rx_obj *rxo;
2060 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002061 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302062 ulong now;
2063 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002064 int i;
2065
2066 aic = &adapter->aic_obj[eqo->idx];
2067 if (!aic->enable) {
2068 if (aic->jiffies)
2069 aic->jiffies = 0;
2070 eqd = aic->et_eqd;
2071 return eqd;
2072 }
2073
2074 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2075 do {
2076 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2077 rx_pkts += rxo->stats.rx_pkts;
2078 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2079 }
2080
2081 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2082 do {
2083 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2084 tx_pkts += txo->stats.tx_reqs;
2085 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2086 }
2087
2088 /* Skip, if wrapped around or first calculation */
2089 now = jiffies;
2090 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2091 rx_pkts < aic->rx_pkts_prev ||
2092 tx_pkts < aic->tx_reqs_prev) {
2093 be_aic_update(aic, rx_pkts, tx_pkts, now);
2094 return aic->prev_eqd;
2095 }
2096
2097 delta = jiffies_to_msecs(now - aic->jiffies);
2098 if (delta == 0)
2099 return aic->prev_eqd;
2100
2101 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2102 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2103 eqd = (pps / 15000) << 2;
2104
2105 if (eqd < 8)
2106 eqd = 0;
2107 eqd = min_t(u32, eqd, aic->max_eqd);
2108 eqd = max_t(u32, eqd, aic->min_eqd);
2109
2110 be_aic_update(aic, rx_pkts, tx_pkts, now);
2111
2112 return eqd;
2113}
2114
2115/* For Skyhawk-R only */
2116static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2117{
2118 struct be_adapter *adapter = eqo->adapter;
2119 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2120 ulong now = jiffies;
2121 int eqd;
2122 u32 mult_enc;
2123
2124 if (!aic->enable)
2125 return 0;
2126
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302127 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002128 eqd = aic->prev_eqd;
2129 else
2130 eqd = be_get_new_eqd(eqo);
2131
2132 if (eqd > 100)
2133 mult_enc = R2I_DLY_ENC_1;
2134 else if (eqd > 60)
2135 mult_enc = R2I_DLY_ENC_2;
2136 else if (eqd > 20)
2137 mult_enc = R2I_DLY_ENC_3;
2138 else
2139 mult_enc = R2I_DLY_ENC_0;
2140
2141 aic->prev_eqd = eqd;
2142
2143 return mult_enc;
2144}
2145
2146void be_eqd_update(struct be_adapter *adapter, bool force_update)
2147{
2148 struct be_set_eqd set_eqd[MAX_EVT_QS];
2149 struct be_aic_obj *aic;
2150 struct be_eq_obj *eqo;
2151 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002152
Sathya Perla2632baf2013-10-01 16:00:00 +05302153 for_all_evt_queues(adapter, eqo, i) {
2154 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002155 eqd = be_get_new_eqd(eqo);
2156 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302157 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2158 set_eqd[num].eq_id = eqo->q.id;
2159 aic->prev_eqd = eqd;
2160 num++;
2161 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002162 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302163
2164 if (num)
2165 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002166}
2167
Sathya Perla3abcded2010-10-03 22:12:27 -07002168static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302169 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002170{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002171 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002172
Sathya Perlaab1594e2011-07-25 19:10:15 +00002173 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002174 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002175 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302177 if (rxcp->tunneled)
2178 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002179 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002180 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002181 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002182 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002183 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184}
2185
Sathya Perla2e588f82011-03-11 02:49:26 +00002186static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002187{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002188 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302189 * Also ignore ipcksm for ipv6 pkts
2190 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002191 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302192 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002193}
2194
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302195static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002199 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302200 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201
Sathya Perla3abcded2010-10-03 22:12:27 -07002202 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203 BUG_ON(!rx_page_info->page);
2204
Sathya Perlae50287b2014-03-04 12:14:38 +05302205 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002206 dma_unmap_page(&adapter->pdev->dev,
2207 dma_unmap_addr(rx_page_info, bus),
2208 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302209 rx_page_info->last_frag = false;
2210 } else {
2211 dma_sync_single_for_cpu(&adapter->pdev->dev,
2212 dma_unmap_addr(rx_page_info, bus),
2213 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002214 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302216 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217 atomic_dec(&rxq->used);
2218 return rx_page_info;
2219}
2220
2221/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002222static void be_rx_compl_discard(struct be_rx_obj *rxo,
2223 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002226 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002228 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302229 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002230 put_page(page_info->page);
2231 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232 }
2233}
2234
2235/*
2236 * skb_fill_rx_data forms a complete skb for an ether frame
2237 * indicated by rxcp.
2238 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002239static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2240 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002243 u16 i, j;
2244 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245 u8 *start;
2246
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302247 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248 start = page_address(page_info->page) + page_info->page_offset;
2249 prefetch(start);
2250
2251 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002252 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254 skb->len = curr_frag_len;
2255 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002256 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257 /* Complete packet has now been moved to data */
2258 put_page(page_info->page);
2259 skb->data_len = 0;
2260 skb->tail += curr_frag_len;
2261 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002262 hdr_len = ETH_HLEN;
2263 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002264 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002265 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 skb_shinfo(skb)->frags[0].page_offset =
2267 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302268 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2269 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002271 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 skb->tail += hdr_len;
2273 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002274 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275
Sathya Perla2e588f82011-03-11 02:49:26 +00002276 if (rxcp->pkt_size <= rx_frag_size) {
2277 BUG_ON(rxcp->num_rcvd != 1);
2278 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279 }
2280
2281 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002282 remaining = rxcp->pkt_size - curr_frag_len;
2283 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302284 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002285 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002287 /* Coalesce all frags from the same physical page in one slot */
2288 if (page_info->page_offset == 0) {
2289 /* Fresh page */
2290 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002291 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002292 skb_shinfo(skb)->frags[j].page_offset =
2293 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002294 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002295 skb_shinfo(skb)->nr_frags++;
2296 } else {
2297 put_page(page_info->page);
2298 }
2299
Eric Dumazet9e903e02011-10-18 21:00:24 +00002300 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301 skb->len += curr_frag_len;
2302 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002303 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002304 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002305 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002307 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308}
2309
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002310/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302311static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002313{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002315 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002317
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002318 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002319 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002320 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322 return;
2323 }
2324
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002327 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002328 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002329 else
2330 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002332 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002333 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002335 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302336
Tom Herbertb6c0e892014-08-27 21:27:17 -07002337 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302338 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339
Jiri Pirko343e43c2011-08-25 02:50:51 +00002340 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002341 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002342
2343 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002344}
2345
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002346/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002347static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2348 struct napi_struct *napi,
2349 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002350{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002351 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002353 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002354 u16 remaining, curr_frag_len;
2355 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002356
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002357 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002358 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002359 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002360 return;
2361 }
2362
Sathya Perla2e588f82011-03-11 02:49:26 +00002363 remaining = rxcp->pkt_size;
2364 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302365 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002366
2367 curr_frag_len = min(remaining, rx_frag_size);
2368
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002369 /* Coalesce all frags from the same physical page in one slot */
2370 if (i == 0 || page_info->page_offset == 0) {
2371 /* First frag or Fresh page */
2372 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002373 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002374 skb_shinfo(skb)->frags[j].page_offset =
2375 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002376 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002377 } else {
2378 put_page(page_info->page);
2379 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002380 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002381 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383 memset(page_info, 0, sizeof(*page_info));
2384 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002385 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002387 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002388 skb->len = rxcp->pkt_size;
2389 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002390 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002391 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002392 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002393 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302394
Tom Herbertb6c0e892014-08-27 21:27:17 -07002395 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002396
Jiri Pirko343e43c2011-08-25 02:50:51 +00002397 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002398 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002399
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401}
2402
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2404 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002405{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302406 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2407 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2408 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2409 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2410 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2411 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2412 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2413 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2414 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2415 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2416 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002417 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302418 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2419 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002420 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302421 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302422 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302423 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002424}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002425
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2427 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002428{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302429 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2430 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2431 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2432 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2433 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2434 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2435 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2436 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2437 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2438 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2439 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002440 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302441 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2442 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002443 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302444 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2445 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002446}
2447
2448static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2449{
2450 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2451 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2452 struct be_adapter *adapter = rxo->adapter;
2453
2454 /* For checking the valid bit it is Ok to use either definition as the
2455 * valid bit is at the same position in both v0 and v1 Rx compl */
2456 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002457 return NULL;
2458
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002459 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002460 be_dws_le_to_cpu(compl, sizeof(*compl));
2461
2462 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002463 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002464 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002466
Somnath Koture38b1702013-05-29 22:55:56 +00002467 if (rxcp->ip_frag)
2468 rxcp->l4_csum = 0;
2469
Sathya Perla15d72182011-03-21 20:49:26 +00002470 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302471 /* In QNQ modes, if qnq bit is not set, then the packet was
2472 * tagged only with the transparent outer vlan-tag and must
2473 * not be treated as a vlan packet by host
2474 */
2475 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002476 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002477
Sathya Perla15d72182011-03-21 20:49:26 +00002478 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002479 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002480
Somnath Kotur939cf302011-08-18 21:51:49 -07002481 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302482 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002483 rxcp->vlanf = 0;
2484 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002485
2486 /* As the compl has been parsed, reset it; we wont touch it again */
2487 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002488
Sathya Perla3abcded2010-10-03 22:12:27 -07002489 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490 return rxcp;
2491}
2492
Eric Dumazet1829b082011-03-01 05:48:12 +00002493static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002496
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002498 gfp |= __GFP_COMP;
2499 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002500}
2501
2502/*
2503 * Allocate a page, split it to fragments of size rx_frag_size and post as
2504 * receive buffers to BE
2505 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302506static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002507{
Sathya Perla3abcded2010-10-03 22:12:27 -07002508 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002509 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002510 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002512 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513 struct be_eth_rx_d *rxd;
2514 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302515 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516
Sathya Perla3abcded2010-10-03 22:12:27 -07002517 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302518 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002520 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002522 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523 break;
2524 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002525 page_dmaaddr = dma_map_page(dev, pagep, 0,
2526 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002527 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002528 if (dma_mapping_error(dev, page_dmaaddr)) {
2529 put_page(pagep);
2530 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302531 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002532 break;
2533 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302534 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002535 } else {
2536 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302537 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302539 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541
2542 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302543 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002544 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2545 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002546
2547 /* Any space left in the current big page for another frag? */
2548 if ((page_offset + rx_frag_size + rx_frag_size) >
2549 adapter->big_page_size) {
2550 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302551 page_info->last_frag = true;
2552 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2553 } else {
2554 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002555 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002556
2557 prev_page_info = page_info;
2558 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302561
2562 /* Mark the last frag of a page when we break out of the above loop
2563 * with no more slots available in the RXQ
2564 */
2565 if (pagep) {
2566 prev_page_info->last_frag = true;
2567 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2568 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002569
2570 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302572 if (rxo->rx_post_starved)
2573 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302574 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002575 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302576 be_rxq_notify(adapter, rxq->id, notify);
2577 posted -= notify;
2578 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002579 } else if (atomic_read(&rxq->used) == 0) {
2580 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002581 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002582 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583}
2584
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302585static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002586{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302587 struct be_queue_info *tx_cq = &txo->cq;
2588 struct be_tx_compl_info *txcp = &txo->txcp;
2589 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002590
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302591 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002592 return NULL;
2593
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302594 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002595 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302596 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302598 txcp->status = GET_TX_COMPL_BITS(status, compl);
2599 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302601 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002602 queue_tail_inc(tx_cq);
2603 return txcp;
2604}
2605
Sathya Perla3c8def92011-06-12 20:01:58 +00002606static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302607 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002608{
Sathya Perla3c8def92011-06-12 20:01:58 +00002609 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002610 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002611 struct sk_buff *skb = NULL;
2612 bool unmap_skb_hdr = false;
2613 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302614 u16 num_wrbs = 0;
2615 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002616
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002617 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002618 if (sent_skbs[txq->tail]) {
2619 /* Free skb from prev req */
2620 if (skb)
2621 dev_consume_skb_any(skb);
2622 skb = sent_skbs[txq->tail];
2623 sent_skbs[txq->tail] = NULL;
2624 queue_tail_inc(txq); /* skip hdr wrb */
2625 num_wrbs++;
2626 unmap_skb_hdr = true;
2627 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002628 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002629 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002630 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002631 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002632 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002633 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002634 num_wrbs++;
2635 } while (frag_index != last_index);
2636 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002638 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002639}
2640
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002641/* Return the number of events in the event queue */
2642static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002643{
2644 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002646
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002647 do {
2648 eqe = queue_tail_node(&eqo->q);
2649 if (eqe->evt == 0)
2650 break;
2651
2652 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002653 eqe->evt = 0;
2654 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002655 queue_tail_inc(&eqo->q);
2656 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002657
2658 return num;
2659}
2660
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002661/* Leaves the EQ is disarmed state */
2662static void be_eq_clean(struct be_eq_obj *eqo)
2663{
2664 int num = events_get(eqo);
2665
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002666 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002667}
2668
Kalesh AP99b44302015-08-05 03:27:49 -04002669/* Free posted rx buffers that were not used */
2670static void be_rxq_clean(struct be_rx_obj *rxo)
2671{
2672 struct be_queue_info *rxq = &rxo->q;
2673 struct be_rx_page_info *page_info;
2674
2675 while (atomic_read(&rxq->used) > 0) {
2676 page_info = get_rx_page_info(rxo);
2677 put_page(page_info->page);
2678 memset(page_info, 0, sizeof(*page_info));
2679 }
2680 BUG_ON(atomic_read(&rxq->used));
2681 rxq->tail = 0;
2682 rxq->head = 0;
2683}
2684
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002685static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686{
Sathya Perla3abcded2010-10-03 22:12:27 -07002687 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002688 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002689 struct be_adapter *adapter = rxo->adapter;
2690 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691
Sathya Perlad23e9462012-12-17 19:38:51 +00002692 /* Consume pending rx completions.
2693 * Wait for the flush completion (identified by zero num_rcvd)
2694 * to arrive. Notify CQ even when there are no more CQ entries
2695 * for HW to flush partially coalesced CQ entries.
2696 * In Lancer, there is no need to wait for flush compl.
2697 */
2698 for (;;) {
2699 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302700 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002701 if (lancer_chip(adapter))
2702 break;
2703
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302704 if (flush_wait++ > 50 ||
2705 be_check_error(adapter,
2706 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002707 dev_warn(&adapter->pdev->dev,
2708 "did not receive flush compl\n");
2709 break;
2710 }
2711 be_cq_notify(adapter, rx_cq->id, true, 0);
2712 mdelay(1);
2713 } else {
2714 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002715 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002716 if (rxcp->num_rcvd == 0)
2717 break;
2718 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002719 }
2720
Sathya Perlad23e9462012-12-17 19:38:51 +00002721 /* After cleanup, leave the CQ in unarmed state */
2722 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002723}
2724
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002725static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002726{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002727 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302728 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302729 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002730 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302731 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302732 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002733 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302735 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002736 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002737 pending_txqs = adapter->num_tx_qs;
2738
2739 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302740 cmpl = 0;
2741 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002742 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302743 while ((txcp = be_tx_compl_get(txo))) {
2744 num_wrbs +=
2745 be_tx_compl_process(adapter, txo,
2746 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002747 cmpl++;
2748 }
2749 if (cmpl) {
2750 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2751 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302752 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002753 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302754 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002755 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002756 }
2757
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302758 if (pending_txqs == 0 || ++timeo > 10 ||
2759 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002760 break;
2761
2762 mdelay(1);
2763 } while (true);
2764
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002765 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002766 for_all_tx_queues(adapter, txo, i) {
2767 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002768
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002769 if (atomic_read(&txq->used)) {
2770 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2771 i, atomic_read(&txq->used));
2772 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002773 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002774 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2775 txq->len);
2776 /* Use the tx-compl process logic to handle requests
2777 * that were not sent to the HW.
2778 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002779 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2780 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002781 BUG_ON(atomic_read(&txq->used));
2782 txo->pend_wrb_cnt = 0;
2783 /* Since hw was never notified of these requests,
2784 * reset TXQ indices
2785 */
2786 txq->head = notified_idx;
2787 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002788 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002789 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002790}
2791
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002792static void be_evt_queues_destroy(struct be_adapter *adapter)
2793{
2794 struct be_eq_obj *eqo;
2795 int i;
2796
2797 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002798 if (eqo->q.created) {
2799 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002800 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302801 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302802 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002803 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002804 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002805 be_queue_free(adapter, &eqo->q);
2806 }
2807}
2808
2809static int be_evt_queues_create(struct be_adapter *adapter)
2810{
2811 struct be_queue_info *eq;
2812 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302813 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002814 int i, rc;
2815
Sathya Perlae2617682016-06-22 08:54:54 -04002816 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302817 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002818 max(adapter->cfg_num_rx_irqs,
2819 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820
2821 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302822 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002823
Sathya Perla2632baf2013-10-01 16:00:00 +05302824 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002826 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302827 aic->max_eqd = BE_MAX_EQD;
2828 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002829
2830 eq = &eqo->q;
2831 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302832 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002833 if (rc)
2834 return rc;
2835
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302836 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002837 if (rc)
2838 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002839
2840 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2841 return -ENOMEM;
2842 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2843 eqo->affinity_mask);
2844 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2845 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002846 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002847 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002848}
2849
Sathya Perla5fb379e2009-06-18 00:02:59 +00002850static void be_mcc_queues_destroy(struct be_adapter *adapter)
2851{
2852 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002853
Sathya Perla8788fdc2009-07-27 22:52:03 +00002854 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002855 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002856 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002857 be_queue_free(adapter, q);
2858
Sathya Perla8788fdc2009-07-27 22:52:03 +00002859 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002860 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002861 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002862 be_queue_free(adapter, q);
2863}
2864
2865/* Must be called only after TX qs are created as MCC shares TX EQ */
2866static int be_mcc_queues_create(struct be_adapter *adapter)
2867{
2868 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002869
Sathya Perla8788fdc2009-07-27 22:52:03 +00002870 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002871 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302872 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002873 goto err;
2874
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002875 /* Use the default EQ for MCC completions */
2876 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002877 goto mcc_cq_free;
2878
Sathya Perla8788fdc2009-07-27 22:52:03 +00002879 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002880 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2881 goto mcc_cq_destroy;
2882
Sathya Perla8788fdc2009-07-27 22:52:03 +00002883 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002884 goto mcc_q_free;
2885
2886 return 0;
2887
2888mcc_q_free:
2889 be_queue_free(adapter, q);
2890mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002891 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002892mcc_cq_free:
2893 be_queue_free(adapter, cq);
2894err:
2895 return -1;
2896}
2897
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002898static void be_tx_queues_destroy(struct be_adapter *adapter)
2899{
2900 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002901 struct be_tx_obj *txo;
2902 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002903
Sathya Perla3c8def92011-06-12 20:01:58 +00002904 for_all_tx_queues(adapter, txo, i) {
2905 q = &txo->q;
2906 if (q->created)
2907 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2908 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002909
Sathya Perla3c8def92011-06-12 20:01:58 +00002910 q = &txo->cq;
2911 if (q->created)
2912 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2913 be_queue_free(adapter, q);
2914 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002915}
2916
Sathya Perla77071332013-08-27 16:57:34 +05302917static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002918{
Sathya Perla73f394e2015-03-26 03:05:09 -04002919 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002920 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002921 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302922 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002923
Sathya Perlae2617682016-06-22 08:54:54 -04002924 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002925
Sathya Perla3c8def92011-06-12 20:01:58 +00002926 for_all_tx_queues(adapter, txo, i) {
2927 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002928 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2929 sizeof(struct be_eth_tx_compl));
2930 if (status)
2931 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002932
John Stultz827da442013-10-07 15:51:58 -07002933 u64_stats_init(&txo->stats.sync);
2934 u64_stats_init(&txo->stats.sync_compl);
2935
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002936 /* If num_evt_qs is less than num_tx_qs, then more than
2937 * one txq share an eq
2938 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002939 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2940 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002941 if (status)
2942 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002943
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002944 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2945 sizeof(struct be_eth_wrb));
2946 if (status)
2947 return status;
2948
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002949 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 if (status)
2951 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002952
2953 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2954 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002955 }
2956
Sathya Perlad3791422012-09-28 04:39:44 +00002957 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2958 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002959 return 0;
2960}
2961
2962static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002963{
2964 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002965 struct be_rx_obj *rxo;
2966 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002967
Sathya Perla3abcded2010-10-03 22:12:27 -07002968 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002969 q = &rxo->cq;
2970 if (q->created)
2971 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2972 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002973 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002974}
2975
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002976static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002977{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002978 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002979 struct be_rx_obj *rxo;
2980 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981
Sathya Perlae2617682016-06-22 08:54:54 -04002982 adapter->num_rss_qs =
2983 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302984
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002985 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04002986 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002987 adapter->num_rss_qs = 0;
2988
2989 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2990
2991 /* When the interface is not capable of RSS rings (and there is no
2992 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002993 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002994 if (adapter->num_rx_qs == 0)
2995 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302996
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002998 for_all_rx_queues(adapter, rxo, i) {
2999 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07003000 cq = &rxo->cq;
3001 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05303002 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07003003 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003004 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003005
John Stultz827da442013-10-07 15:51:58 -07003006 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003007 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3008 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07003009 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003010 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07003011 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003012
Sathya Perlad3791422012-09-28 04:39:44 +00003013 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003014 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003015 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00003016}
3017
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003018static irqreturn_t be_intx(int irq, void *dev)
3019{
Sathya Perlae49cc342012-11-27 19:50:02 +00003020 struct be_eq_obj *eqo = dev;
3021 struct be_adapter *adapter = eqo->adapter;
3022 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003024 /* IRQ is not expected when NAPI is scheduled as the EQ
3025 * will not be armed.
3026 * But, this can happen on Lancer INTx where it takes
3027 * a while to de-assert INTx or in BE2 where occasionaly
3028 * an interrupt may be raised even when EQ is unarmed.
3029 * If NAPI is already scheduled, then counting & notifying
3030 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00003031 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003032 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003033 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003034 __napi_schedule(&eqo->napi);
3035 if (num_evts)
3036 eqo->spurious_intr = 0;
3037 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003038 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003039
3040 /* Return IRQ_HANDLED only for the the first spurious intr
3041 * after a valid intr to stop the kernel from branding
3042 * this irq as a bad one!
3043 */
3044 if (num_evts || eqo->spurious_intr++ == 0)
3045 return IRQ_HANDLED;
3046 else
3047 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003048}
3049
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003050static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003051{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003052 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003054 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003055 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056 return IRQ_HANDLED;
3057}
3058
Sathya Perla2e588f82011-03-11 02:49:26 +00003059static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003060{
Somnath Koture38b1702013-05-29 22:55:56 +00003061 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062}
3063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003064static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05303065 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003066{
Sathya Perla3abcded2010-10-03 22:12:27 -07003067 struct be_adapter *adapter = rxo->adapter;
3068 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003069 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303071 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072
3073 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003074 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003075 if (!rxcp)
3076 break;
3077
Sathya Perla12004ae2011-08-02 19:57:46 +00003078 /* Is it a flush compl that has no data */
3079 if (unlikely(rxcp->num_rcvd == 0))
3080 goto loop_continue;
3081
3082 /* Discard compl with partial DMA Lancer B0 */
3083 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003084 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003085 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003086 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003087
Sathya Perla12004ae2011-08-02 19:57:46 +00003088 /* On BE drop pkts that arrive due to imperfect filtering in
3089 * promiscuous mode on some skews
3090 */
3091 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303092 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003093 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003094 goto loop_continue;
3095 }
3096
Sathya Perla6384a4d2013-10-25 10:40:16 +05303097 /* Don't do gro when we're busy_polling */
3098 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003099 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003100 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303101 be_rx_compl_process(rxo, napi, rxcp);
3102
Sathya Perla12004ae2011-08-02 19:57:46 +00003103loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303104 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003105 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003106 }
3107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003108 if (work_done) {
3109 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003110
Sathya Perla6384a4d2013-10-25 10:40:16 +05303111 /* When an rx-obj gets into post_starved state, just
3112 * let be_worker do the posting.
3113 */
3114 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3115 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303116 be_post_rx_frags(rxo, GFP_ATOMIC,
3117 max_t(u32, MAX_RX_POST,
3118 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003119 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121 return work_done;
3122}
3123
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303124static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303125{
3126 switch (status) {
3127 case BE_TX_COMP_HDR_PARSE_ERR:
3128 tx_stats(txo)->tx_hdr_parse_err++;
3129 break;
3130 case BE_TX_COMP_NDMA_ERR:
3131 tx_stats(txo)->tx_dma_err++;
3132 break;
3133 case BE_TX_COMP_ACL_ERR:
3134 tx_stats(txo)->tx_spoof_check_err++;
3135 break;
3136 }
3137}
3138
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303139static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303140{
3141 switch (status) {
3142 case LANCER_TX_COMP_LSO_ERR:
3143 tx_stats(txo)->tx_tso_err++;
3144 break;
3145 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3146 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3147 tx_stats(txo)->tx_spoof_check_err++;
3148 break;
3149 case LANCER_TX_COMP_QINQ_ERR:
3150 tx_stats(txo)->tx_qinq_err++;
3151 break;
3152 case LANCER_TX_COMP_PARITY_ERR:
3153 tx_stats(txo)->tx_internal_parity_err++;
3154 break;
3155 case LANCER_TX_COMP_DMA_ERR:
3156 tx_stats(txo)->tx_dma_err++;
3157 break;
3158 }
3159}
3160
Sathya Perlac8f64612014-09-02 09:56:55 +05303161static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3162 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003163{
Sathya Perlac8f64612014-09-02 09:56:55 +05303164 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303165 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003166
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303167 while ((txcp = be_tx_compl_get(txo))) {
3168 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303169 work_done++;
3170
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303171 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303172 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303173 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303174 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303175 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303176 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003177 }
3178
3179 if (work_done) {
3180 be_cq_notify(adapter, txo->cq.id, true, work_done);
3181 atomic_sub(num_wrbs, &txo->q.used);
3182
3183 /* As Tx wrbs have been freed up, wake up netdev queue
3184 * if it was stopped due to lack of tx wrbs. */
3185 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303186 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003187 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003188 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003190 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3191 tx_stats(txo)->tx_compl += work_done;
3192 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3193 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003194}
Sathya Perla3c8def92011-06-12 20:01:58 +00003195
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003196#ifdef CONFIG_NET_RX_BUSY_POLL
3197static inline bool be_lock_napi(struct be_eq_obj *eqo)
3198{
3199 bool status = true;
3200
3201 spin_lock(&eqo->lock); /* BH is already disabled */
3202 if (eqo->state & BE_EQ_LOCKED) {
3203 WARN_ON(eqo->state & BE_EQ_NAPI);
3204 eqo->state |= BE_EQ_NAPI_YIELD;
3205 status = false;
3206 } else {
3207 eqo->state = BE_EQ_NAPI;
3208 }
3209 spin_unlock(&eqo->lock);
3210 return status;
3211}
3212
3213static inline void be_unlock_napi(struct be_eq_obj *eqo)
3214{
3215 spin_lock(&eqo->lock); /* BH is already disabled */
3216
3217 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3218 eqo->state = BE_EQ_IDLE;
3219
3220 spin_unlock(&eqo->lock);
3221}
3222
3223static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3224{
3225 bool status = true;
3226
3227 spin_lock_bh(&eqo->lock);
3228 if (eqo->state & BE_EQ_LOCKED) {
3229 eqo->state |= BE_EQ_POLL_YIELD;
3230 status = false;
3231 } else {
3232 eqo->state |= BE_EQ_POLL;
3233 }
3234 spin_unlock_bh(&eqo->lock);
3235 return status;
3236}
3237
3238static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3239{
3240 spin_lock_bh(&eqo->lock);
3241
3242 WARN_ON(eqo->state & (BE_EQ_NAPI));
3243 eqo->state = BE_EQ_IDLE;
3244
3245 spin_unlock_bh(&eqo->lock);
3246}
3247
3248static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3249{
3250 spin_lock_init(&eqo->lock);
3251 eqo->state = BE_EQ_IDLE;
3252}
3253
3254static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3255{
3256 local_bh_disable();
3257
3258 /* It's enough to just acquire napi lock on the eqo to stop
3259 * be_busy_poll() from processing any queueus.
3260 */
3261 while (!be_lock_napi(eqo))
3262 mdelay(1);
3263
3264 local_bh_enable();
3265}
3266
3267#else /* CONFIG_NET_RX_BUSY_POLL */
3268
3269static inline bool be_lock_napi(struct be_eq_obj *eqo)
3270{
3271 return true;
3272}
3273
3274static inline void be_unlock_napi(struct be_eq_obj *eqo)
3275{
3276}
3277
3278static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3279{
3280 return false;
3281}
3282
3283static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3284{
3285}
3286
3287static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3288{
3289}
3290
3291static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3292{
3293}
3294#endif /* CONFIG_NET_RX_BUSY_POLL */
3295
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303296int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003297{
3298 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3299 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003300 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303301 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303302 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003303 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003304
Sathya Perla0b545a62012-11-23 00:27:18 +00003305 num_evts = events_get(eqo);
3306
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303307 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3308 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309
Sathya Perla6384a4d2013-10-25 10:40:16 +05303310 if (be_lock_napi(eqo)) {
3311 /* This loop will iterate twice for EQ0 in which
3312 * completions of the last RXQ (default one) are also processed
3313 * For other EQs the loop iterates only once
3314 */
3315 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3316 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3317 max_work = max(work, max_work);
3318 }
3319 be_unlock_napi(eqo);
3320 } else {
3321 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003322 }
3323
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003324 if (is_mcc_eqo(eqo))
3325 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003327 if (max_work < budget) {
3328 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003329
3330 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3331 * delay via a delay multiplier encoding value
3332 */
3333 if (skyhawk_chip(adapter))
3334 mult_enc = be_get_eq_delay_mult_enc(eqo);
3335
3336 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3337 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003338 } else {
3339 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003340 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003341 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003342 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003343}
3344
Sathya Perla6384a4d2013-10-25 10:40:16 +05303345#ifdef CONFIG_NET_RX_BUSY_POLL
3346static int be_busy_poll(struct napi_struct *napi)
3347{
3348 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3349 struct be_adapter *adapter = eqo->adapter;
3350 struct be_rx_obj *rxo;
3351 int i, work = 0;
3352
3353 if (!be_lock_busy_poll(eqo))
3354 return LL_FLUSH_BUSY;
3355
3356 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3357 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3358 if (work)
3359 break;
3360 }
3361
3362 be_unlock_busy_poll(eqo);
3363 return work;
3364}
3365#endif
3366
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003367void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003368{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003369 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3370 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003371 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303372 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003373
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303374 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003375 return;
3376
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003377 if (lancer_chip(adapter)) {
3378 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3379 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303380 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003381 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303382 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003383 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303384 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303385 /* Do not log error messages if its a FW reset */
3386 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3387 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3388 dev_info(dev, "Firmware update in progress\n");
3389 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303390 dev_err(dev, "Error detected in the card\n");
3391 dev_err(dev, "ERR: sliport status 0x%x\n",
3392 sliport_status);
3393 dev_err(dev, "ERR: sliport error1 0x%x\n",
3394 sliport_err1);
3395 dev_err(dev, "ERR: sliport error2 0x%x\n",
3396 sliport_err2);
3397 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003398 }
3399 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003400 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3401 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3402 ue_lo_mask = ioread32(adapter->pcicfg +
3403 PCICFG_UE_STATUS_LOW_MASK);
3404 ue_hi_mask = ioread32(adapter->pcicfg +
3405 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003406
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003407 ue_lo = (ue_lo & ~ue_lo_mask);
3408 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003409
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303410 /* On certain platforms BE hardware can indicate spurious UEs.
3411 * Allow HW to stop working completely in case of a real UE.
3412 * Hence not setting the hw_error for UE detection.
3413 */
3414
3415 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303416 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303417 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303418 be_set_error(adapter, BE_ERROR_UE);
3419
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303420 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3421 if (ue_lo & 1)
3422 dev_err(dev, "UE: %s bit set\n",
3423 ue_status_low_desc[i]);
3424 }
3425 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3426 if (ue_hi & 1)
3427 dev_err(dev, "UE: %s bit set\n",
3428 ue_status_hi_desc[i]);
3429 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303430 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003431 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003432}
3433
Sathya Perla8d56ff12009-11-22 22:02:26 +00003434static void be_msix_disable(struct be_adapter *adapter)
3435{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003436 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003437 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003438 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303439 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003440 }
3441}
3442
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003443static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003444{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003445 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003446 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003447 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003448
Sathya Perlace7faf02016-06-22 08:54:53 -04003449 /* If RoCE is supported, program the max number of vectors that
3450 * could be used for NIC and RoCE, else, just program the number
3451 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303452 */
Sathya Perlae2617682016-06-22 08:54:54 -04003453 if (be_roce_supported(adapter)) {
3454 max_roce_eqs =
3455 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3456 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3457 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3458 } else {
3459 num_vec = max(adapter->cfg_num_rx_irqs,
3460 adapter->cfg_num_tx_irqs);
3461 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003462
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003463 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003464 adapter->msix_entries[i].entry = i;
3465
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003466 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3467 MIN_MSIX_VECTORS, num_vec);
3468 if (num_vec < 0)
3469 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003470
Sathya Perla92bf14a2013-08-27 16:57:32 +05303471 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3472 adapter->num_msix_roce_vec = num_vec / 2;
3473 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3474 adapter->num_msix_roce_vec);
3475 }
3476
3477 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3478
3479 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3480 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003481 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003482
3483fail:
3484 dev_warn(dev, "MSIx enable failed\n");
3485
3486 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003487 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003488 return num_vec;
3489 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003490}
3491
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003492static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303493 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303495 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003496}
3497
3498static int be_msix_register(struct be_adapter *adapter)
3499{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003500 struct net_device *netdev = adapter->netdev;
3501 struct be_eq_obj *eqo;
3502 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003503
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003504 for_all_evt_queues(adapter, eqo, i) {
3505 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3506 vec = be_msix_vec_get(adapter, eqo);
3507 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003508 if (status)
3509 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003510
3511 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003512 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003513
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003514 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003515err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303516 for (i--; i >= 0; i--) {
3517 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003518 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303519 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003520 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303521 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003522 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003523 return status;
3524}
3525
3526static int be_irq_register(struct be_adapter *adapter)
3527{
3528 struct net_device *netdev = adapter->netdev;
3529 int status;
3530
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003531 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003532 status = be_msix_register(adapter);
3533 if (status == 0)
3534 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003535 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003536 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003537 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003538 }
3539
Sathya Perlae49cc342012-11-27 19:50:02 +00003540 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003541 netdev->irq = adapter->pdev->irq;
3542 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003543 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003544 if (status) {
3545 dev_err(&adapter->pdev->dev,
3546 "INTx request IRQ failed - err %d\n", status);
3547 return status;
3548 }
3549done:
3550 adapter->isr_registered = true;
3551 return 0;
3552}
3553
3554static void be_irq_unregister(struct be_adapter *adapter)
3555{
3556 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003557 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003558 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003559
3560 if (!adapter->isr_registered)
3561 return;
3562
3563 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003564 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003565 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003566 goto done;
3567 }
3568
3569 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003570 for_all_evt_queues(adapter, eqo, i) {
3571 vec = be_msix_vec_get(adapter, eqo);
3572 irq_set_affinity_hint(vec, NULL);
3573 free_irq(vec, eqo);
3574 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003575
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003576done:
3577 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003578}
3579
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003580static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003581{
Ajit Khaparde62219062016-02-10 22:45:53 +05303582 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003583 struct be_queue_info *q;
3584 struct be_rx_obj *rxo;
3585 int i;
3586
3587 for_all_rx_queues(adapter, rxo, i) {
3588 q = &rxo->q;
3589 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003590 /* If RXQs are destroyed while in an "out of buffer"
3591 * state, there is a possibility of an HW stall on
3592 * Lancer. So, post 64 buffers to each queue to relieve
3593 * the "out of buffer" condition.
3594 * Make sure there's space in the RXQ before posting.
3595 */
3596 if (lancer_chip(adapter)) {
3597 be_rx_cq_clean(rxo);
3598 if (atomic_read(&q->used) == 0)
3599 be_post_rx_frags(rxo, GFP_KERNEL,
3600 MAX_RX_POST);
3601 }
3602
Sathya Perla482c9e72011-06-29 23:33:17 +00003603 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003604 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003605 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003606 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003607 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003608 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303609
3610 if (rss->rss_flags) {
3611 rss->rss_flags = RSS_ENABLE_NONE;
3612 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3613 128, rss->rss_hkey);
3614 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003615}
3616
Kalesh APbcc84142015-08-05 03:27:48 -04003617static void be_disable_if_filters(struct be_adapter *adapter)
3618{
Suresh Reddy988d44b2016-09-07 19:57:52 +05303619 be_dev_mac_del(adapter, adapter->pmac_id[0]);
Kalesh APbcc84142015-08-05 03:27:48 -04003620 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003621 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003622
3623 /* The IFACE flags are enabled in the open path and cleared
3624 * in the close path. When a VF gets detached from the host and
3625 * assigned to a VM the following happens:
3626 * - VF's IFACE flags get cleared in the detach path
3627 * - IFACE create is issued by the VF in the attach path
3628 * Due to a bug in the BE3/Skyhawk-R FW
3629 * (Lancer FW doesn't have the bug), the IFACE capability flags
3630 * specified along with the IFACE create cmd issued by a VF are not
3631 * honoured by FW. As a consequence, if a *new* driver
3632 * (that enables/disables IFACE flags in open/close)
3633 * is loaded in the host and an *old* driver is * used by a VM/VF,
3634 * the IFACE gets created *without* the needed flags.
3635 * To avoid this, disable RX-filter flags only for Lancer.
3636 */
3637 if (lancer_chip(adapter)) {
3638 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3639 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3640 }
3641}
3642
Sathya Perla889cd4b2010-05-30 23:33:45 +00003643static int be_close(struct net_device *netdev)
3644{
3645 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003646 struct be_eq_obj *eqo;
3647 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003648
Kalesh APe1ad8e32014-04-14 16:12:41 +05303649 /* This protection is needed as be_close() may be called even when the
3650 * adapter is in cleared state (after eeh perm failure)
3651 */
3652 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3653 return 0;
3654
Sathya Perlab7172412016-07-27 05:26:18 -04003655 /* Before attempting cleanup ensure all the pending cmds in the
3656 * config_wq have finished execution
3657 */
3658 flush_workqueue(be_wq);
3659
Kalesh APbcc84142015-08-05 03:27:48 -04003660 be_disable_if_filters(adapter);
3661
Ivan Veceradff345c52013-11-27 08:59:32 +01003662 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3663 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003664 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303665 be_disable_busy_poll(eqo);
3666 }
David S. Miller71237b62013-11-28 18:53:36 -05003667 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003668 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003669
3670 be_async_mcc_disable(adapter);
3671
3672 /* Wait for all pending tx completions to arrive so that
3673 * all tx skbs are freed.
3674 */
Sathya Perlafba87552013-05-08 02:05:50 +00003675 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303676 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003677
3678 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003679
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003680 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003681 if (msix_enabled(adapter))
3682 synchronize_irq(be_msix_vec_get(adapter, eqo));
3683 else
3684 synchronize_irq(netdev->irq);
3685 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003686 }
3687
Sathya Perla889cd4b2010-05-30 23:33:45 +00003688 be_irq_unregister(adapter);
3689
Sathya Perla482c9e72011-06-29 23:33:17 +00003690 return 0;
3691}
3692
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003693static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003694{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003695 struct rss_info *rss = &adapter->rss_info;
3696 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003697 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003698 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003699
3700 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003701 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3702 sizeof(struct be_eth_rx_d));
3703 if (rc)
3704 return rc;
3705 }
3706
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003707 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3708 rxo = default_rxo(adapter);
3709 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3710 rx_frag_size, adapter->if_handle,
3711 false, &rxo->rss_id);
3712 if (rc)
3713 return rc;
3714 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003715
3716 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003717 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003718 rx_frag_size, adapter->if_handle,
3719 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003720 if (rc)
3721 return rc;
3722 }
3723
3724 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003725 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003726 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303727 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003728 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303729 rss->rsstable[j + i] = rxo->rss_id;
3730 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003731 }
3732 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303733 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3734 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003735
3736 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303737 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3738 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303739
3740 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3741 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3742 RSS_INDIR_TABLE_LEN, rss_key);
3743 if (rc) {
3744 rss->rss_flags = RSS_ENABLE_NONE;
3745 return rc;
3746 }
3747
3748 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303749 } else {
3750 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303751 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303752 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003753
Venkata Duvvurue2557872014-04-21 15:38:00 +05303754
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003755 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3756 * which is a queue empty condition
3757 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003758 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003759 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3760
Sathya Perla889cd4b2010-05-30 23:33:45 +00003761 return 0;
3762}
3763
Kalesh APbcc84142015-08-05 03:27:48 -04003764static int be_enable_if_filters(struct be_adapter *adapter)
3765{
3766 int status;
3767
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003768 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003769 if (status)
3770 return status;
3771
3772 /* For BE3 VFs, the PF programs the initial MAC address */
3773 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05303774 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003775 if (status)
3776 return status;
3777 }
3778
3779 if (adapter->vlans_added)
3780 be_vid_config(adapter);
3781
Sathya Perlab7172412016-07-27 05:26:18 -04003782 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003783
3784 return 0;
3785}
3786
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003787static int be_open(struct net_device *netdev)
3788{
3789 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003790 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003791 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003792 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003793 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003794 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003795
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003796 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003797 if (status)
3798 goto err;
3799
Kalesh APbcc84142015-08-05 03:27:48 -04003800 status = be_enable_if_filters(adapter);
3801 if (status)
3802 goto err;
3803
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003804 status = be_irq_register(adapter);
3805 if (status)
3806 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003807
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003808 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003809 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003810
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003811 for_all_tx_queues(adapter, txo, i)
3812 be_cq_notify(adapter, txo->cq.id, true, 0);
3813
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003814 be_async_mcc_enable(adapter);
3815
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003816 for_all_evt_queues(adapter, eqo, i) {
3817 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303818 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003819 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003820 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003821 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003822
Sathya Perla323ff712012-09-28 04:39:43 +00003823 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003824 if (!status)
3825 be_link_status_update(adapter, link_status);
3826
Sathya Perlafba87552013-05-08 02:05:50 +00003827 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303828 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003829 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303830
Sathya Perla889cd4b2010-05-30 23:33:45 +00003831 return 0;
3832err:
3833 be_close(adapter->netdev);
3834 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003835}
3836
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003837static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3838{
3839 u32 addr;
3840
3841 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3842
3843 mac[5] = (u8)(addr & 0xFF);
3844 mac[4] = (u8)((addr >> 8) & 0xFF);
3845 mac[3] = (u8)((addr >> 16) & 0xFF);
3846 /* Use the OUI from the current MAC address */
3847 memcpy(mac, adapter->netdev->dev_addr, 3);
3848}
3849
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003850/*
3851 * Generate a seed MAC address from the PF MAC Address using jhash.
3852 * MAC Address for VFs are assigned incrementally starting from the seed.
3853 * These addresses are programmed in the ASIC by the PF and the VF driver
3854 * queries for the MAC address during its probe.
3855 */
Sathya Perla4c876612013-02-03 20:30:11 +00003856static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003857{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003858 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003859 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003860 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003861 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003862
3863 be_vf_eth_addr_generate(adapter, mac);
3864
Sathya Perla11ac75e2011-12-13 00:58:50 +00003865 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303866 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003867 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003868 vf_cfg->if_handle,
3869 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303870 else
3871 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3872 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003873
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003874 if (status)
3875 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303876 "Mac address assignment failed for VF %d\n",
3877 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003878 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003879 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003880
3881 mac[5] += 1;
3882 }
3883 return status;
3884}
3885
Sathya Perla4c876612013-02-03 20:30:11 +00003886static int be_vfs_mac_query(struct be_adapter *adapter)
3887{
3888 int status, vf;
3889 u8 mac[ETH_ALEN];
3890 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003891
3892 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303893 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3894 mac, vf_cfg->if_handle,
3895 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003896 if (status)
3897 return status;
3898 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3899 }
3900 return 0;
3901}
3902
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003903static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003904{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003905 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003906 u32 vf;
3907
Sathya Perla257a3fe2013-06-14 15:54:51 +05303908 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003909 dev_warn(&adapter->pdev->dev,
3910 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003911 goto done;
3912 }
3913
Sathya Perlab4c1df92013-05-08 02:05:47 +00003914 pci_disable_sriov(adapter->pdev);
3915
Sathya Perla11ac75e2011-12-13 00:58:50 +00003916 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303917 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003918 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3919 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303920 else
3921 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3922 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003923
Sathya Perla11ac75e2011-12-13 00:58:50 +00003924 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3925 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003926
3927 if (BE3_chip(adapter))
3928 be_cmd_set_hsw_config(adapter, 0, 0,
3929 adapter->if_handle,
3930 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003931done:
3932 kfree(adapter->vf_cfg);
3933 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303934 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003935}
3936
Sathya Perla77071332013-08-27 16:57:34 +05303937static void be_clear_queues(struct be_adapter *adapter)
3938{
3939 be_mcc_queues_destroy(adapter);
3940 be_rx_cqs_destroy(adapter);
3941 be_tx_queues_destroy(adapter);
3942 be_evt_queues_destroy(adapter);
3943}
3944
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303945static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003946{
Sathya Perla191eb752012-02-23 18:50:13 +00003947 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3948 cancel_delayed_work_sync(&adapter->work);
3949 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3950 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303951}
3952
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003953static void be_cancel_err_detection(struct be_adapter *adapter)
3954{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303955 struct be_error_recovery *err_rec = &adapter->error_recovery;
3956
3957 if (!be_err_recovery_workq)
3958 return;
3959
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003960 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303961 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003962 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3963 }
3964}
3965
Sathya Perlac9c47142014-03-27 10:46:19 +05303966static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3967{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003968 struct net_device *netdev = adapter->netdev;
3969
Sathya Perlac9c47142014-03-27 10:46:19 +05303970 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3971 be_cmd_manage_iface(adapter, adapter->if_handle,
3972 OP_CONVERT_TUNNEL_TO_NORMAL);
3973
3974 if (adapter->vxlan_port)
3975 be_cmd_set_vxlan_port(adapter, 0);
3976
3977 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3978 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003979
3980 netdev->hw_enc_features = 0;
3981 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303982 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303983}
3984
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003985static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3986 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05003987{
3988 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003989 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3990 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003991 u16 num_vf_qs = 1;
3992
Somnath Koturde2b1e02016-06-06 07:22:10 -04003993 /* Distribute the queue resources among the PF and it's VFs */
3994 if (num_vfs) {
3995 /* Divide the rx queues evenly among the VFs and the PF, capped
3996 * at VF-EQ-count. Any remainder queues belong to the PF.
3997 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05303998 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3999 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05004000
Somnath Koturde2b1e02016-06-06 07:22:10 -04004001 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4002 * RSS Tables per port. Provide RSS on VFs, only if number of
4003 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05004004 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004005 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05004006 num_vf_qs = 1;
4007 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004008
4009 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4010 * which are modifiable using SET_PROFILE_CONFIG cmd.
4011 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004012 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4013 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004014
4015 /* If RSS IFACE capability flags are modifiable for a VF, set the
4016 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4017 * more than 1 RSSQ is available for a VF.
4018 * Otherwise, provision only 1 queue pair for VF.
4019 */
4020 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4021 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4022 if (num_vf_qs > 1) {
4023 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4024 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4025 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4026 } else {
4027 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4028 BE_IF_FLAGS_DEFQ_RSS);
4029 }
4030 } else {
4031 num_vf_qs = 1;
4032 }
4033
4034 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4035 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4036 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4037 }
4038
4039 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4040 vft_res->max_rx_qs = num_vf_qs;
4041 vft_res->max_rss_qs = num_vf_qs;
4042 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4043 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4044
4045 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4046 * among the PF and it's VFs, if the fields are changeable
4047 */
4048 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4049 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4050
4051 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4052 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4053
4054 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4055 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4056
4057 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4058 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004059}
4060
Sathya Perlab7172412016-07-27 05:26:18 -04004061static void be_if_destroy(struct be_adapter *adapter)
4062{
4063 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4064
4065 kfree(adapter->pmac_id);
4066 adapter->pmac_id = NULL;
4067
4068 kfree(adapter->mc_list);
4069 adapter->mc_list = NULL;
4070
4071 kfree(adapter->uc_list);
4072 adapter->uc_list = NULL;
4073}
4074
Somnath Koturb05004a2013-12-05 12:08:16 +05304075static int be_clear(struct be_adapter *adapter)
4076{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004077 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004078 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004079
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304080 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004081
Sathya Perlab7172412016-07-27 05:26:18 -04004082 flush_workqueue(be_wq);
4083
Sathya Perla11ac75e2011-12-13 00:58:50 +00004084 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004085 be_vf_clear(adapter);
4086
Vasundhara Volambec84e62014-06-30 13:01:32 +05304087 /* Re-configure FW to distribute resources evenly across max-supported
4088 * number of VFs, only when VFs are not already enabled.
4089 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004090 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4091 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004092 be_calculate_vf_res(adapter,
4093 pci_sriov_get_totalvfs(pdev),
4094 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304095 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004096 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004097 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004098 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304099
Sathya Perlac9c47142014-03-27 10:46:19 +05304100 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004101
Sathya Perlab7172412016-07-27 05:26:18 -04004102 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004103
Sathya Perla77071332013-08-27 16:57:34 +05304104 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004105
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004106 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304107 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004108 return 0;
4109}
4110
Sathya Perla4c876612013-02-03 20:30:11 +00004111static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004112{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304113 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004114 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004115 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004116 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004117
Kalesh AP0700d812015-01-20 03:51:43 -05004118 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004119 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004120
Sathya Perla4c876612013-02-03 20:30:11 +00004121 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304122 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004123 status = be_cmd_get_profile_config(adapter, &res, NULL,
4124 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004125 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304126 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004127 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304128 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004129 /* Prevent VFs from enabling VLAN promiscuous
4130 * mode
4131 */
4132 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4133 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304134 }
Sathya Perla4c876612013-02-03 20:30:11 +00004135
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004136 /* PF should enable IF flags during proxy if_create call */
4137 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004138 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4139 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004140 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004141 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004142 }
Kalesh AP0700d812015-01-20 03:51:43 -05004143
4144 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004145}
4146
Sathya Perla39f1d942012-05-08 19:41:24 +00004147static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004148{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004149 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004150 int vf;
4151
Sathya Perla39f1d942012-05-08 19:41:24 +00004152 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4153 GFP_KERNEL);
4154 if (!adapter->vf_cfg)
4155 return -ENOMEM;
4156
Sathya Perla11ac75e2011-12-13 00:58:50 +00004157 for_all_vfs(adapter, vf_cfg, vf) {
4158 vf_cfg->if_handle = -1;
4159 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004160 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004161 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004162}
4163
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004164static int be_vf_setup(struct be_adapter *adapter)
4165{
Sathya Perla4c876612013-02-03 20:30:11 +00004166 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304167 struct be_vf_cfg *vf_cfg;
4168 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004169 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004170
Sathya Perla257a3fe2013-06-14 15:54:51 +05304171 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004172
4173 status = be_vf_setup_init(adapter);
4174 if (status)
4175 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004176
Sathya Perla4c876612013-02-03 20:30:11 +00004177 if (old_vfs) {
4178 for_all_vfs(adapter, vf_cfg, vf) {
4179 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4180 if (status)
4181 goto err;
4182 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004183
Sathya Perla4c876612013-02-03 20:30:11 +00004184 status = be_vfs_mac_query(adapter);
4185 if (status)
4186 goto err;
4187 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304188 status = be_vfs_if_create(adapter);
4189 if (status)
4190 goto err;
4191
Sathya Perla39f1d942012-05-08 19:41:24 +00004192 status = be_vf_eth_addr_config(adapter);
4193 if (status)
4194 goto err;
4195 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004196
Sathya Perla11ac75e2011-12-13 00:58:50 +00004197 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304198 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004199 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4200 vf + 1);
4201 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304202 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004203 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304204 BE_PRIV_FILTMGMT,
4205 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004206 if (!status) {
4207 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304208 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4209 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004210 }
Sathya Perla04a06022013-07-23 15:25:00 +05304211 }
4212
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304213 /* Allow full available bandwidth */
4214 if (!old_vfs)
4215 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004216
Kalesh APe7bcbd72015-05-06 05:30:32 -04004217 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4218 vf_cfg->if_handle, NULL,
4219 &spoofchk);
4220 if (!status)
4221 vf_cfg->spoofchk = spoofchk;
4222
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304223 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304224 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304225 be_cmd_set_logical_link_config(adapter,
4226 IFLA_VF_LINK_STATE_AUTO,
4227 vf+1);
4228 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004229 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004230
4231 if (!old_vfs) {
4232 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4233 if (status) {
4234 dev_err(dev, "SRIOV enable failed\n");
4235 adapter->num_vfs = 0;
4236 goto err;
4237 }
4238 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304239
Somnath Kotur884476b2016-06-22 08:54:55 -04004240 if (BE3_chip(adapter)) {
4241 /* On BE3, enable VEB only when SRIOV is enabled */
4242 status = be_cmd_set_hsw_config(adapter, 0, 0,
4243 adapter->if_handle,
4244 PORT_FWD_TYPE_VEB, 0);
4245 if (status)
4246 goto err;
4247 }
4248
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304249 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004250 return 0;
4251err:
Sathya Perla4c876612013-02-03 20:30:11 +00004252 dev_err(dev, "VF setup failed\n");
4253 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004254 return status;
4255}
4256
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304257/* Converting function_mode bits on BE3 to SH mc_type enums */
4258
4259static u8 be_convert_mc_type(u32 function_mode)
4260{
Suresh Reddy66064db2014-06-23 16:41:29 +05304261 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304262 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304263 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304264 return FLEX10;
4265 else if (function_mode & VNIC_MODE)
4266 return vNIC2;
4267 else if (function_mode & UMC_ENABLED)
4268 return UMC;
4269 else
4270 return MC_NONE;
4271}
4272
Sathya Perla92bf14a2013-08-27 16:57:32 +05304273/* On BE2/BE3 FW does not suggest the supported limits */
4274static void BEx_get_resources(struct be_adapter *adapter,
4275 struct be_resources *res)
4276{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304277 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304278
4279 if (be_physfn(adapter))
4280 res->max_uc_mac = BE_UC_PMAC_COUNT;
4281 else
4282 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4283
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304284 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4285
4286 if (be_is_mc(adapter)) {
4287 /* Assuming that there are 4 channels per port,
4288 * when multi-channel is enabled
4289 */
4290 if (be_is_qnq_mode(adapter))
4291 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4292 else
4293 /* In a non-qnq multichannel mode, the pvid
4294 * takes up one vlan entry
4295 */
4296 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4297 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304298 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304299 }
4300
Sathya Perla92bf14a2013-08-27 16:57:32 +05304301 res->max_mcast_mac = BE_MAX_MC;
4302
Vasundhara Volama5243da2014-03-11 18:53:07 +05304303 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4304 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4305 * *only* if it is RSS-capable.
4306 */
4307 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004308 be_virtfn(adapter) ||
4309 (be_is_mc(adapter) &&
4310 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304311 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304312 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4313 struct be_resources super_nic_res = {0};
4314
4315 /* On a SuperNIC profile, the driver needs to use the
4316 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4317 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004318 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4319 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4320 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304321 /* Some old versions of BE3 FW don't report max_tx_qs value */
4322 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4323 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304324 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304325 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304326
4327 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4328 !use_sriov && be_physfn(adapter))
4329 res->max_rss_qs = (adapter->be3_native) ?
4330 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4331 res->max_rx_qs = res->max_rss_qs + 1;
4332
Suresh Reddye3dc8672014-01-06 13:02:25 +05304333 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304334 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304335 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4336 else
4337 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304338
4339 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004340 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304341 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4342 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4343}
4344
Sathya Perla30128032011-11-10 19:17:57 +00004345static void be_setup_init(struct be_adapter *adapter)
4346{
4347 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004348 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004349 adapter->if_handle = -1;
4350 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004351 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304352 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004353 if (be_physfn(adapter))
4354 adapter->cmd_privileges = MAX_PRIVILEGES;
4355 else
4356 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004357}
4358
Somnath Koturde2b1e02016-06-06 07:22:10 -04004359/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4360 * However, this HW limitation is not exposed to the host via any SLI cmd.
4361 * As a result, in the case of SRIOV and in particular multi-partition configs
4362 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4363 * for distribution between the VFs. This self-imposed limit will determine the
4364 * no: of VFs for which RSS can be enabled.
4365 */
4366void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4367{
4368 struct be_port_resources port_res = {0};
4369 u8 rss_tables_on_port;
4370 u16 max_vfs = be_max_vfs(adapter);
4371
4372 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4373 RESOURCE_LIMITS, 0);
4374
4375 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4376
4377 /* Each PF Pool's RSS Tables limit =
4378 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4379 */
4380 adapter->pool_res.max_rss_tables =
4381 max_vfs * rss_tables_on_port / port_res.max_vfs;
4382}
4383
Vasundhara Volambec84e62014-06-30 13:01:32 +05304384static int be_get_sriov_config(struct be_adapter *adapter)
4385{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304386 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304387 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304388
Somnath Koturde2b1e02016-06-06 07:22:10 -04004389 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4390 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304391
Vasundhara Volamace40af2015-03-04 00:44:34 -05004392 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304393 if (BE3_chip(adapter) && !res.max_vfs) {
4394 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4395 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4396 }
4397
Sathya Perlad3d18312014-08-01 17:47:30 +05304398 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304399
Vasundhara Volamace40af2015-03-04 00:44:34 -05004400 /* If during previous unload of the driver, the VFs were not disabled,
4401 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4402 * Instead use the TotalVFs value stored in the pci-dev struct.
4403 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304404 old_vfs = pci_num_vf(adapter->pdev);
4405 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004406 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4407 old_vfs);
4408
4409 adapter->pool_res.max_vfs =
4410 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304411 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304412 }
4413
Somnath Koturde2b1e02016-06-06 07:22:10 -04004414 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4415 be_calculate_pf_pool_rss_tables(adapter);
4416 dev_info(&adapter->pdev->dev,
4417 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4418 be_max_pf_pool_rss_tables(adapter));
4419 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304420 return 0;
4421}
4422
Vasundhara Volamace40af2015-03-04 00:44:34 -05004423static void be_alloc_sriov_res(struct be_adapter *adapter)
4424{
4425 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004426 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004427 int status;
4428
4429 be_get_sriov_config(adapter);
4430
4431 if (!old_vfs)
4432 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4433
4434 /* When the HW is in SRIOV capable configuration, the PF-pool
4435 * resources are given to PF during driver load, if there are no
4436 * old VFs. This facility is not available in BE3 FW.
4437 * Also, this is done by FW in Lancer chip.
4438 */
4439 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004440 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004441 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004442 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004443 if (status)
4444 dev_err(&adapter->pdev->dev,
4445 "Failed to optimize SRIOV resources\n");
4446 }
4447}
4448
Sathya Perla92bf14a2013-08-27 16:57:32 +05304449static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004450{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304451 struct device *dev = &adapter->pdev->dev;
4452 struct be_resources res = {0};
4453 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004454
Sathya Perla92bf14a2013-08-27 16:57:32 +05304455 /* For Lancer, SH etc read per-function resource limits from FW.
4456 * GET_FUNC_CONFIG returns per function guaranteed limits.
4457 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4458 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004459 if (BEx_chip(adapter)) {
4460 BEx_get_resources(adapter, &res);
4461 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304462 status = be_cmd_get_func_config(adapter, &res);
4463 if (status)
4464 return status;
4465
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004466 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4467 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4468 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4469 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004470 }
4471
Sathya Perlace7faf02016-06-22 08:54:53 -04004472 /* If RoCE is supported stash away half the EQs for RoCE */
4473 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4474 res.max_evt_qs / 2 : res.max_evt_qs;
4475 adapter->res = res;
4476
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004477 /* If FW supports RSS default queue, then skip creating non-RSS
4478 * queue for non-IP traffic.
4479 */
4480 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4481 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4482
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304483 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4484 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004485 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304486 be_max_vfs(adapter));
4487 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4488 be_max_uc(adapter), be_max_mc(adapter),
4489 be_max_vlans(adapter));
4490
Sathya Perlae2617682016-06-22 08:54:54 -04004491 /* Ensure RX and TX queues are created in pairs at init time */
4492 adapter->cfg_num_rx_irqs =
4493 min_t(u16, netif_get_num_default_rss_queues(),
4494 be_max_qp_irqs(adapter));
4495 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304496 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004497}
4498
Sathya Perla39f1d942012-05-08 19:41:24 +00004499static int be_get_config(struct be_adapter *adapter)
4500{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004501 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304502 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004503
Suresh Reddy980df242015-12-30 01:29:03 -05004504 status = be_cmd_get_cntl_attributes(adapter);
4505 if (status)
4506 return status;
4507
Kalesh APe97e3cd2014-07-17 16:20:26 +05304508 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004509 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304510 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004511
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004512 if (!lancer_chip(adapter) && be_physfn(adapter))
4513 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4514
Sathya Perla6b085ba2015-02-23 04:20:09 -05004515 if (BEx_chip(adapter)) {
4516 level = be_cmd_get_fw_log_level(adapter);
4517 adapter->msg_enable =
4518 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4519 }
4520
4521 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004522 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4523 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004524
Vasundhara Volam21252372015-02-06 08:18:42 -05004525 be_cmd_query_port_name(adapter);
4526
4527 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304528 status = be_cmd_get_active_profile(adapter, &profile_id);
4529 if (!status)
4530 dev_info(&adapter->pdev->dev,
4531 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304532 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304533
Sathya Perla92bf14a2013-08-27 16:57:32 +05304534 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004535}
4536
Sathya Perla95046b92013-07-23 15:25:02 +05304537static int be_mac_setup(struct be_adapter *adapter)
4538{
4539 u8 mac[ETH_ALEN];
4540 int status;
4541
4542 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4543 status = be_cmd_get_perm_mac(adapter, mac);
4544 if (status)
4545 return status;
4546
4547 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4548 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304549 }
4550
Sathya Perla95046b92013-07-23 15:25:02 +05304551 return 0;
4552}
4553
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304554static void be_schedule_worker(struct be_adapter *adapter)
4555{
Sathya Perlab7172412016-07-27 05:26:18 -04004556 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304557 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4558}
4559
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304560static void be_destroy_err_recovery_workq(void)
4561{
4562 if (!be_err_recovery_workq)
4563 return;
4564
4565 flush_workqueue(be_err_recovery_workq);
4566 destroy_workqueue(be_err_recovery_workq);
4567 be_err_recovery_workq = NULL;
4568}
4569
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304570static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004571{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304572 struct be_error_recovery *err_rec = &adapter->error_recovery;
4573
4574 if (!be_err_recovery_workq)
4575 return;
4576
4577 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4578 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004579 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4580}
4581
Sathya Perla77071332013-08-27 16:57:34 +05304582static int be_setup_queues(struct be_adapter *adapter)
4583{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304584 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304585 int status;
4586
4587 status = be_evt_queues_create(adapter);
4588 if (status)
4589 goto err;
4590
4591 status = be_tx_qs_create(adapter);
4592 if (status)
4593 goto err;
4594
4595 status = be_rx_cqs_create(adapter);
4596 if (status)
4597 goto err;
4598
4599 status = be_mcc_queues_create(adapter);
4600 if (status)
4601 goto err;
4602
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304603 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4604 if (status)
4605 goto err;
4606
4607 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4608 if (status)
4609 goto err;
4610
Sathya Perla77071332013-08-27 16:57:34 +05304611 return 0;
4612err:
4613 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4614 return status;
4615}
4616
Ajit Khaparde62219062016-02-10 22:45:53 +05304617static int be_if_create(struct be_adapter *adapter)
4618{
4619 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4620 u32 cap_flags = be_if_cap_flags(adapter);
4621 int status;
4622
Sathya Perlab7172412016-07-27 05:26:18 -04004623 /* alloc required memory for other filtering fields */
4624 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4625 sizeof(*adapter->pmac_id), GFP_KERNEL);
4626 if (!adapter->pmac_id)
4627 return -ENOMEM;
4628
4629 adapter->mc_list = kcalloc(be_max_mc(adapter),
4630 sizeof(*adapter->mc_list), GFP_KERNEL);
4631 if (!adapter->mc_list)
4632 return -ENOMEM;
4633
4634 adapter->uc_list = kcalloc(be_max_uc(adapter),
4635 sizeof(*adapter->uc_list), GFP_KERNEL);
4636 if (!adapter->uc_list)
4637 return -ENOMEM;
4638
Sathya Perlae2617682016-06-22 08:54:54 -04004639 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304640 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4641
4642 en_flags &= cap_flags;
4643 /* will enable all the needed filter flags in be_open() */
4644 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4645 &adapter->if_handle, 0);
4646
Sathya Perlab7172412016-07-27 05:26:18 -04004647 if (status)
4648 return status;
4649
4650 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304651}
4652
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304653int be_update_queues(struct be_adapter *adapter)
4654{
4655 struct net_device *netdev = adapter->netdev;
4656 int status;
4657
4658 if (netif_running(netdev))
4659 be_close(netdev);
4660
4661 be_cancel_worker(adapter);
4662
4663 /* If any vectors have been shared with RoCE we cannot re-program
4664 * the MSIx table.
4665 */
4666 if (!adapter->num_msix_roce_vec)
4667 be_msix_disable(adapter);
4668
4669 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304670 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4671 if (status)
4672 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304673
4674 if (!msix_enabled(adapter)) {
4675 status = be_msix_enable(adapter);
4676 if (status)
4677 return status;
4678 }
4679
Ajit Khaparde62219062016-02-10 22:45:53 +05304680 status = be_if_create(adapter);
4681 if (status)
4682 return status;
4683
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304684 status = be_setup_queues(adapter);
4685 if (status)
4686 return status;
4687
4688 be_schedule_worker(adapter);
4689
4690 if (netif_running(netdev))
4691 status = be_open(netdev);
4692
4693 return status;
4694}
4695
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004696static inline int fw_major_num(const char *fw_ver)
4697{
4698 int fw_major = 0, i;
4699
4700 i = sscanf(fw_ver, "%d.", &fw_major);
4701 if (i != 1)
4702 return 0;
4703
4704 return fw_major;
4705}
4706
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304707/* If it is error recovery, FLR the PF
4708 * Else if any VFs are already enabled don't FLR the PF
4709 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004710static bool be_reset_required(struct be_adapter *adapter)
4711{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304712 if (be_error_recovering(adapter))
4713 return true;
4714 else
4715 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004716}
4717
4718/* Wait for the FW to be ready and perform the required initialization */
4719static int be_func_init(struct be_adapter *adapter)
4720{
4721 int status;
4722
4723 status = be_fw_wait_ready(adapter);
4724 if (status)
4725 return status;
4726
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304727 /* FW is now ready; clear errors to allow cmds/doorbell */
4728 be_clear_error(adapter, BE_CLEAR_ALL);
4729
Sathya Perlaf962f842015-02-23 04:20:16 -05004730 if (be_reset_required(adapter)) {
4731 status = be_cmd_reset_function(adapter);
4732 if (status)
4733 return status;
4734
4735 /* Wait for interrupts to quiesce after an FLR */
4736 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004737 }
4738
4739 /* Tell FW we're ready to fire cmds */
4740 status = be_cmd_fw_init(adapter);
4741 if (status)
4742 return status;
4743
4744 /* Allow interrupts for other ULPs running on NIC function */
4745 be_intr_set(adapter, true);
4746
4747 return 0;
4748}
4749
Sathya Perla5fb379e2009-06-18 00:02:59 +00004750static int be_setup(struct be_adapter *adapter)
4751{
Sathya Perla39f1d942012-05-08 19:41:24 +00004752 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004753 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004754
Sathya Perlaf962f842015-02-23 04:20:16 -05004755 status = be_func_init(adapter);
4756 if (status)
4757 return status;
4758
Sathya Perla30128032011-11-10 19:17:57 +00004759 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004760
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004761 if (!lancer_chip(adapter))
4762 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004763
Suresh Reddy980df242015-12-30 01:29:03 -05004764 /* invoke this cmd first to get pf_num and vf_num which are needed
4765 * for issuing profile related cmds
4766 */
4767 if (!BEx_chip(adapter)) {
4768 status = be_cmd_get_func_config(adapter, NULL);
4769 if (status)
4770 return status;
4771 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004772
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004773 status = be_get_config(adapter);
4774 if (status)
4775 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004776
Somnath Koturde2b1e02016-06-06 07:22:10 -04004777 if (!BE2_chip(adapter) && be_physfn(adapter))
4778 be_alloc_sriov_res(adapter);
4779
4780 status = be_get_resources(adapter);
4781 if (status)
4782 goto err;
4783
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004784 status = be_msix_enable(adapter);
4785 if (status)
4786 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004787
Kalesh APbcc84142015-08-05 03:27:48 -04004788 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304789 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004790 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004791 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004792
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304793 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4794 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304795 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304796 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004797 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004798 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004799
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004800 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004801
Sathya Perla95046b92013-07-23 15:25:02 +05304802 status = be_mac_setup(adapter);
4803 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004804 goto err;
4805
Kalesh APe97e3cd2014-07-17 16:20:26 +05304806 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304807 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004808
Somnath Koture9e2a902013-10-24 14:37:53 +05304809 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304810 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304811 adapter->fw_ver);
4812 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4813 }
4814
Kalesh AP00d594c2015-01-20 03:51:44 -05004815 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4816 adapter->rx_fc);
4817 if (status)
4818 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4819 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004820
Kalesh AP00d594c2015-01-20 03:51:44 -05004821 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4822 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004823
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304824 if (be_physfn(adapter))
4825 be_cmd_set_logical_link_config(adapter,
4826 IFLA_VF_LINK_STATE_AUTO, 0);
4827
Somnath Kotur884476b2016-06-22 08:54:55 -04004828 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4829 * confusing a linux bridge or OVS that it might be connected to.
4830 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4831 * when SRIOV is not enabled.
4832 */
4833 if (BE3_chip(adapter))
4834 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4835 PORT_FWD_TYPE_PASSTHRU, 0);
4836
Vasundhara Volambec84e62014-06-30 13:01:32 +05304837 if (adapter->num_vfs)
4838 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004839
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004840 status = be_cmd_get_phy_info(adapter);
4841 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004842 adapter->phy.fc_autoneg = 1;
4843
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304844 if (be_physfn(adapter) && !lancer_chip(adapter))
4845 be_cmd_set_features(adapter);
4846
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304847 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304848 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004849 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004850err:
4851 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004852 return status;
4853}
4854
Ivan Vecera66268732011-12-08 01:31:21 +00004855#ifdef CONFIG_NET_POLL_CONTROLLER
4856static void be_netpoll(struct net_device *netdev)
4857{
4858 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004859 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004860 int i;
4861
Sathya Perlae49cc342012-11-27 19:50:02 +00004862 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004863 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004864 napi_schedule(&eqo->napi);
4865 }
Ivan Vecera66268732011-12-08 01:31:21 +00004866}
4867#endif
4868
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004869int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4870{
4871 const struct firmware *fw;
4872 int status;
4873
4874 if (!netif_running(adapter->netdev)) {
4875 dev_err(&adapter->pdev->dev,
4876 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304877 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004878 }
4879
4880 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4881 if (status)
4882 goto fw_exit;
4883
4884 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4885
4886 if (lancer_chip(adapter))
4887 status = lancer_fw_download(adapter, fw);
4888 else
4889 status = be_fw_download(adapter, fw);
4890
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004891 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304892 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004893
Ajit Khaparde84517482009-09-04 03:12:16 +00004894fw_exit:
4895 release_firmware(fw);
4896 return status;
4897}
4898
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004899static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4900 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004901{
4902 struct be_adapter *adapter = netdev_priv(dev);
4903 struct nlattr *attr, *br_spec;
4904 int rem;
4905 int status = 0;
4906 u16 mode = 0;
4907
4908 if (!sriov_enabled(adapter))
4909 return -EOPNOTSUPP;
4910
4911 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004912 if (!br_spec)
4913 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004914
4915 nla_for_each_nested(attr, br_spec, rem) {
4916 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4917 continue;
4918
Thomas Grafb7c1a312014-11-26 13:42:17 +01004919 if (nla_len(attr) < sizeof(mode))
4920 return -EINVAL;
4921
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004922 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004923 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4924 return -EOPNOTSUPP;
4925
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004926 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4927 return -EINVAL;
4928
4929 status = be_cmd_set_hsw_config(adapter, 0, 0,
4930 adapter->if_handle,
4931 mode == BRIDGE_MODE_VEPA ?
4932 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004933 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004934 if (status)
4935 goto err;
4936
4937 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4938 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4939
4940 return status;
4941 }
4942err:
4943 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4944 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4945
4946 return status;
4947}
4948
4949static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004950 struct net_device *dev, u32 filter_mask,
4951 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004952{
4953 struct be_adapter *adapter = netdev_priv(dev);
4954 int status = 0;
4955 u8 hsw_mode;
4956
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004957 /* BE and Lancer chips support VEB mode only */
4958 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004959 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4960 if (!pci_sriov_get_totalvfs(adapter->pdev))
4961 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004962 hsw_mode = PORT_FWD_TYPE_VEB;
4963 } else {
4964 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004965 adapter->if_handle, &hsw_mode,
4966 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004967 if (status)
4968 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004969
4970 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4971 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004972 }
4973
4974 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4975 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004976 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004977 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004978}
4979
Sathya Perlab7172412016-07-27 05:26:18 -04004980static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4981 void (*func)(struct work_struct *))
4982{
4983 struct be_cmd_work *work;
4984
4985 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4986 if (!work) {
4987 dev_err(&adapter->pdev->dev,
4988 "be_work memory allocation failed\n");
4989 return NULL;
4990 }
4991
4992 INIT_WORK(&work->work, func);
4993 work->adapter = adapter;
4994 return work;
4995}
4996
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004997/* VxLAN offload Notes:
4998 *
4999 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5000 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5001 * is expected to work across all types of IP tunnels once exported. Skyhawk
5002 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305003 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5004 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5005 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005006 *
5007 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5008 * adds more than one port, disable offloads and don't re-enable them again
5009 * until after all the tunnels are removed.
5010 */
Sathya Perlab7172412016-07-27 05:26:18 -04005011static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305012{
Sathya Perlab7172412016-07-27 05:26:18 -04005013 struct be_cmd_work *cmd_work =
5014 container_of(work, struct be_cmd_work, work);
5015 struct be_adapter *adapter = cmd_work->adapter;
5016 struct net_device *netdev = adapter->netdev;
Sathya Perlac9c47142014-03-27 10:46:19 +05305017 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04005018 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305019 int status;
5020
Jiri Benc1e5b3112015-09-17 16:11:13 +02005021 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5022 adapter->vxlan_port_aliases++;
Sathya Perlab7172412016-07-27 05:26:18 -04005023 goto done;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005024 }
5025
Sathya Perlac9c47142014-03-27 10:46:19 +05305026 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305027 dev_info(dev,
5028 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005029 dev_info(dev, "Disabling VxLAN offloads\n");
5030 adapter->vxlan_port_count++;
5031 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305032 }
5033
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005034 if (adapter->vxlan_port_count++ >= 1)
Sathya Perlab7172412016-07-27 05:26:18 -04005035 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005036
Sathya Perlac9c47142014-03-27 10:46:19 +05305037 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5038 OP_CONVERT_NORMAL_TO_TUNNEL);
5039 if (status) {
5040 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5041 goto err;
5042 }
5043
5044 status = be_cmd_set_vxlan_port(adapter, port);
5045 if (status) {
5046 dev_warn(dev, "Failed to add VxLAN port\n");
5047 goto err;
5048 }
5049 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5050 adapter->vxlan_port = port;
5051
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005052 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5053 NETIF_F_TSO | NETIF_F_TSO6 |
5054 NETIF_F_GSO_UDP_TUNNEL;
5055 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305056 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005057
Sathya Perlac9c47142014-03-27 10:46:19 +05305058 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5059 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005060 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305061err:
5062 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04005063done:
5064 kfree(cmd_work);
Sathya Perlac9c47142014-03-27 10:46:19 +05305065}
5066
Sathya Perlab7172412016-07-27 05:26:18 -04005067static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305068{
Sathya Perlab7172412016-07-27 05:26:18 -04005069 struct be_cmd_work *cmd_work =
5070 container_of(work, struct be_cmd_work, work);
5071 struct be_adapter *adapter = cmd_work->adapter;
5072 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305073
5074 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005075 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305076
Jiri Benc1e5b3112015-09-17 16:11:13 +02005077 if (adapter->vxlan_port_aliases) {
5078 adapter->vxlan_port_aliases--;
Sathya Perlab7172412016-07-27 05:26:18 -04005079 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005080 }
5081
Sathya Perlac9c47142014-03-27 10:46:19 +05305082 be_disable_vxlan_offloads(adapter);
5083
5084 dev_info(&adapter->pdev->dev,
5085 "Disabled VxLAN offloads for UDP port %d\n",
5086 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005087done:
5088 adapter->vxlan_port_count--;
Sathya Perlab7172412016-07-27 05:26:18 -04005089out:
5090 kfree(cmd_work);
5091}
5092
5093static void be_cfg_vxlan_port(struct net_device *netdev,
5094 struct udp_tunnel_info *ti,
5095 void (*func)(struct work_struct *))
5096{
5097 struct be_adapter *adapter = netdev_priv(netdev);
5098 struct be_cmd_work *cmd_work;
5099
5100 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5101 return;
5102
5103 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5104 return;
5105
5106 cmd_work = be_alloc_work(adapter, func);
5107 if (cmd_work) {
5108 cmd_work->info.vxlan_port = ti->port;
5109 queue_work(be_wq, &cmd_work->work);
5110 }
5111}
5112
5113static void be_del_vxlan_port(struct net_device *netdev,
5114 struct udp_tunnel_info *ti)
5115{
5116 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5117}
5118
5119static void be_add_vxlan_port(struct net_device *netdev,
5120 struct udp_tunnel_info *ti)
5121{
5122 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305123}
Joe Stringer725d5482014-11-13 16:38:13 -08005124
Jesse Gross5f352272014-12-23 22:37:26 -08005125static netdev_features_t be_features_check(struct sk_buff *skb,
5126 struct net_device *dev,
5127 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005128{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305129 struct be_adapter *adapter = netdev_priv(dev);
5130 u8 l4_hdr = 0;
5131
5132 /* The code below restricts offload features for some tunneled packets.
5133 * Offload features for normal (non tunnel) packets are unchanged.
5134 */
5135 if (!skb->encapsulation ||
5136 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5137 return features;
5138
5139 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5140 * should disable tunnel offload features if it's not a VxLAN packet,
5141 * as tunnel offloads have been enabled only for VxLAN. This is done to
5142 * allow other tunneled traffic like GRE work fine while VxLAN
5143 * offloads are configured in Skyhawk-R.
5144 */
5145 switch (vlan_get_protocol(skb)) {
5146 case htons(ETH_P_IP):
5147 l4_hdr = ip_hdr(skb)->protocol;
5148 break;
5149 case htons(ETH_P_IPV6):
5150 l4_hdr = ipv6_hdr(skb)->nexthdr;
5151 break;
5152 default:
5153 return features;
5154 }
5155
5156 if (l4_hdr != IPPROTO_UDP ||
5157 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5158 skb->inner_protocol != htons(ETH_P_TEB) ||
5159 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5160 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08005161 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305162
5163 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005164}
Sathya Perlac9c47142014-03-27 10:46:19 +05305165
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305166static int be_get_phys_port_id(struct net_device *dev,
5167 struct netdev_phys_item_id *ppid)
5168{
5169 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5170 struct be_adapter *adapter = netdev_priv(dev);
5171 u8 *id;
5172
5173 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5174 return -ENOSPC;
5175
5176 ppid->id[0] = adapter->hba_port_num + 1;
5177 id = &ppid->id[1];
5178 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5179 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5180 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5181
5182 ppid->id_len = id_len;
5183
5184 return 0;
5185}
5186
Sathya Perlab7172412016-07-27 05:26:18 -04005187static void be_set_rx_mode(struct net_device *dev)
5188{
5189 struct be_adapter *adapter = netdev_priv(dev);
5190 struct be_cmd_work *work;
5191
5192 work = be_alloc_work(adapter, be_work_set_rx_mode);
5193 if (work)
5194 queue_work(be_wq, &work->work);
5195}
5196
stephen hemmingere5686ad2012-01-05 19:10:25 +00005197static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005198 .ndo_open = be_open,
5199 .ndo_stop = be_close,
5200 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005201 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005202 .ndo_set_mac_address = be_mac_addr_set,
5203 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005204 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005205 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005206 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5207 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005208 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005209 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005210 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005211 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305212 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005213 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005214#ifdef CONFIG_NET_POLL_CONTROLLER
5215 .ndo_poll_controller = be_netpoll,
5216#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005217 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5218 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305219#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305220 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305221#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005222 .ndo_udp_tunnel_add = be_add_vxlan_port,
5223 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005224 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305225 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005226};
5227
5228static void be_netdev_init(struct net_device *netdev)
5229{
5230 struct be_adapter *adapter = netdev_priv(netdev);
5231
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005232 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005233 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005234 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305235 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005236 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005237
5238 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005239 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005240
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005241 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005242 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005243
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005244 netdev->priv_flags |= IFF_UNICAST_FLT;
5245
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005246 netdev->flags |= IFF_MULTICAST;
5247
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305248 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005249
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005250 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005251
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005252 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005253}
5254
Kalesh AP87ac1a52015-02-23 04:20:15 -05005255static void be_cleanup(struct be_adapter *adapter)
5256{
5257 struct net_device *netdev = adapter->netdev;
5258
5259 rtnl_lock();
5260 netif_device_detach(netdev);
5261 if (netif_running(netdev))
5262 be_close(netdev);
5263 rtnl_unlock();
5264
5265 be_clear(adapter);
5266}
5267
Kalesh AP484d76f2015-02-23 04:20:14 -05005268static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005269{
Kalesh APd0e1b312015-02-23 04:20:12 -05005270 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005271 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005272
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005273 status = be_setup(adapter);
5274 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005275 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005276
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005277 rtnl_lock();
5278 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005279 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005280 rtnl_unlock();
5281
5282 if (status)
5283 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005284
Kalesh APd0e1b312015-02-23 04:20:12 -05005285 netif_device_attach(netdev);
5286
Kalesh AP484d76f2015-02-23 04:20:14 -05005287 return 0;
5288}
5289
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305290static void be_soft_reset(struct be_adapter *adapter)
5291{
5292 u32 val;
5293
5294 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5295 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5296 val |= SLIPORT_SOFTRESET_SR_MASK;
5297 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5298}
5299
5300static bool be_err_is_recoverable(struct be_adapter *adapter)
5301{
5302 struct be_error_recovery *err_rec = &adapter->error_recovery;
5303 unsigned long initial_idle_time =
5304 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5305 unsigned long recovery_interval =
5306 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5307 u16 ue_err_code;
5308 u32 val;
5309
5310 val = be_POST_stage_get(adapter);
5311 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5312 return false;
5313 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5314 if (ue_err_code == 0)
5315 return false;
5316
5317 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5318 ue_err_code);
5319
5320 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5321 dev_err(&adapter->pdev->dev,
5322 "Cannot recover within %lu sec from driver load\n",
5323 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5324 return false;
5325 }
5326
5327 if (err_rec->last_recovery_time &&
5328 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5329 dev_err(&adapter->pdev->dev,
5330 "Cannot recover within %lu sec from last recovery\n",
5331 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5332 return false;
5333 }
5334
5335 if (ue_err_code == err_rec->last_err_code) {
5336 dev_err(&adapter->pdev->dev,
5337 "Cannot recover from a consecutive TPE error\n");
5338 return false;
5339 }
5340
5341 err_rec->last_recovery_time = jiffies;
5342 err_rec->last_err_code = ue_err_code;
5343 return true;
5344}
5345
5346static int be_tpe_recover(struct be_adapter *adapter)
5347{
5348 struct be_error_recovery *err_rec = &adapter->error_recovery;
5349 int status = -EAGAIN;
5350 u32 val;
5351
5352 switch (err_rec->recovery_state) {
5353 case ERR_RECOVERY_ST_NONE:
5354 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5355 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5356 break;
5357
5358 case ERR_RECOVERY_ST_DETECT:
5359 val = be_POST_stage_get(adapter);
5360 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5361 POST_STAGE_RECOVERABLE_ERR) {
5362 dev_err(&adapter->pdev->dev,
5363 "Unrecoverable HW error detected: 0x%x\n", val);
5364 status = -EINVAL;
5365 err_rec->resched_delay = 0;
5366 break;
5367 }
5368
5369 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5370
5371 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5372 * milliseconds before it checks for final error status in
5373 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5374 * If it does, then PF0 initiates a Soft Reset.
5375 */
5376 if (adapter->pf_num == 0) {
5377 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5378 err_rec->resched_delay = err_rec->ue_to_reset_time -
5379 ERR_RECOVERY_UE_DETECT_DURATION;
5380 break;
5381 }
5382
5383 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5384 err_rec->resched_delay = err_rec->ue_to_poll_time -
5385 ERR_RECOVERY_UE_DETECT_DURATION;
5386 break;
5387
5388 case ERR_RECOVERY_ST_RESET:
5389 if (!be_err_is_recoverable(adapter)) {
5390 dev_err(&adapter->pdev->dev,
5391 "Failed to meet recovery criteria\n");
5392 status = -EIO;
5393 err_rec->resched_delay = 0;
5394 break;
5395 }
5396 be_soft_reset(adapter);
5397 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5398 err_rec->resched_delay = err_rec->ue_to_poll_time -
5399 err_rec->ue_to_reset_time;
5400 break;
5401
5402 case ERR_RECOVERY_ST_PRE_POLL:
5403 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5404 err_rec->resched_delay = 0;
5405 status = 0; /* done */
5406 break;
5407
5408 default:
5409 status = -EINVAL;
5410 err_rec->resched_delay = 0;
5411 break;
5412 }
5413
5414 return status;
5415}
5416
Kalesh AP484d76f2015-02-23 04:20:14 -05005417static int be_err_recover(struct be_adapter *adapter)
5418{
Kalesh AP484d76f2015-02-23 04:20:14 -05005419 int status;
5420
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305421 if (!lancer_chip(adapter)) {
5422 if (!adapter->error_recovery.recovery_supported ||
5423 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5424 return -EIO;
5425 status = be_tpe_recover(adapter);
5426 if (status)
5427 goto err;
5428 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305429
5430 /* Wait for adapter to reach quiescent state before
5431 * destroying queues
5432 */
5433 status = be_fw_wait_ready(adapter);
5434 if (status)
5435 goto err;
5436
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305437 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5438
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305439 be_cleanup(adapter);
5440
Kalesh AP484d76f2015-02-23 04:20:14 -05005441 status = be_resume(adapter);
5442 if (status)
5443 goto err;
5444
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305445 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5446
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005447err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005448 return status;
5449}
5450
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005451static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005452{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305453 struct be_error_recovery *err_rec =
5454 container_of(work, struct be_error_recovery,
5455 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005456 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305457 container_of(err_rec, struct be_adapter,
5458 error_recovery);
5459 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305460 struct device *dev = &adapter->pdev->dev;
5461 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005462
5463 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305464 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305465 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005466
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305467 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305468 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305469 err_rec->recovery_retries = 0;
5470 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305471 dev_info(dev, "Adapter recovery successful\n");
5472 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305473 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5474 /* BEx/SH recovery state machine */
5475 if (adapter->pf_num == 0 &&
5476 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5477 dev_err(&adapter->pdev->dev,
5478 "Adapter recovery in progress\n");
5479 resched_delay = err_rec->resched_delay;
5480 goto reschedule_task;
5481 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305482 /* For VFs, check if PF have allocated resources
5483 * every second.
5484 */
5485 dev_err(dev, "Re-trying adapter recovery\n");
5486 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305487 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5488 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305489 /* In case of another error during recovery, it takes 30 sec
5490 * for adapter to come out of error. Retry error recovery after
5491 * this time interval.
5492 */
5493 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305494 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305495 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305496 } else {
5497 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305498 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005499 }
5500
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305501 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305502
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305503reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305504 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005505}
5506
Vasundhara Volam21252372015-02-06 08:18:42 -05005507static void be_log_sfp_info(struct be_adapter *adapter)
5508{
5509 int status;
5510
5511 status = be_cmd_query_sfp_info(adapter);
5512 if (!status) {
5513 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305514 "Port %c: %s Vendor: %s part no: %s",
5515 adapter->port_name,
5516 be_misconfig_evt_port_state[adapter->phy_state],
5517 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005518 adapter->phy.vendor_pn);
5519 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305520 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005521}
5522
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005523static void be_worker(struct work_struct *work)
5524{
5525 struct be_adapter *adapter =
5526 container_of(work, struct be_adapter, work.work);
5527 struct be_rx_obj *rxo;
5528 int i;
5529
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005530 if (be_physfn(adapter) &&
5531 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5532 be_cmd_get_die_temperature(adapter);
5533
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005534 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005535 * mcc completions
5536 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005537 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005538 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005539 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005540 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005541 goto reschedule;
5542 }
5543
5544 if (!adapter->stats_cmd_sent) {
5545 if (lancer_chip(adapter))
5546 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305547 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005548 else
5549 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5550 }
5551
5552 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305553 /* Replenish RX-queues starved due to memory
5554 * allocation failures.
5555 */
5556 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305557 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005558 }
5559
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005560 /* EQ-delay update for Skyhawk is done while notifying EQ */
5561 if (!skyhawk_chip(adapter))
5562 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005563
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305564 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005565 be_log_sfp_info(adapter);
5566
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005567reschedule:
5568 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005569 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005570}
5571
Sathya Perla78fad34e2015-02-23 04:20:08 -05005572static void be_unmap_pci_bars(struct be_adapter *adapter)
5573{
5574 if (adapter->csr)
5575 pci_iounmap(adapter->pdev, adapter->csr);
5576 if (adapter->db)
5577 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005578 if (adapter->pcicfg && adapter->pcicfg_mapped)
5579 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005580}
5581
5582static int db_bar(struct be_adapter *adapter)
5583{
Kalesh AP18c57c72015-05-06 05:30:38 -04005584 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005585 return 0;
5586 else
5587 return 4;
5588}
5589
5590static int be_roce_map_pci_bars(struct be_adapter *adapter)
5591{
5592 if (skyhawk_chip(adapter)) {
5593 adapter->roce_db.size = 4096;
5594 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5595 db_bar(adapter));
5596 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5597 db_bar(adapter));
5598 }
5599 return 0;
5600}
5601
5602static int be_map_pci_bars(struct be_adapter *adapter)
5603{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005604 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005605 u8 __iomem *addr;
5606 u32 sli_intf;
5607
5608 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5609 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5610 SLI_INTF_FAMILY_SHIFT;
5611 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5612
5613 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005614 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005615 if (!adapter->csr)
5616 return -ENOMEM;
5617 }
5618
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005619 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005620 if (!addr)
5621 goto pci_map_err;
5622 adapter->db = addr;
5623
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005624 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5625 if (be_physfn(adapter)) {
5626 /* PCICFG is the 2nd BAR in BE2 */
5627 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5628 if (!addr)
5629 goto pci_map_err;
5630 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005631 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005632 } else {
5633 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005634 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005635 }
5636 }
5637
Sathya Perla78fad34e2015-02-23 04:20:08 -05005638 be_roce_map_pci_bars(adapter);
5639 return 0;
5640
5641pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005642 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005643 be_unmap_pci_bars(adapter);
5644 return -ENOMEM;
5645}
5646
5647static void be_drv_cleanup(struct be_adapter *adapter)
5648{
5649 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5650 struct device *dev = &adapter->pdev->dev;
5651
5652 if (mem->va)
5653 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5654
5655 mem = &adapter->rx_filter;
5656 if (mem->va)
5657 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5658
5659 mem = &adapter->stats_cmd;
5660 if (mem->va)
5661 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5662}
5663
5664/* Allocate and initialize various fields in be_adapter struct */
5665static int be_drv_init(struct be_adapter *adapter)
5666{
5667 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5668 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5669 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5670 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5671 struct device *dev = &adapter->pdev->dev;
5672 int status = 0;
5673
5674 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305675 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5676 &mbox_mem_alloc->dma,
5677 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005678 if (!mbox_mem_alloc->va)
5679 return -ENOMEM;
5680
5681 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5682 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5683 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005684
5685 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5686 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5687 &rx_filter->dma, GFP_KERNEL);
5688 if (!rx_filter->va) {
5689 status = -ENOMEM;
5690 goto free_mbox;
5691 }
5692
5693 if (lancer_chip(adapter))
5694 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5695 else if (BE2_chip(adapter))
5696 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5697 else if (BE3_chip(adapter))
5698 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5699 else
5700 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5701 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5702 &stats_cmd->dma, GFP_KERNEL);
5703 if (!stats_cmd->va) {
5704 status = -ENOMEM;
5705 goto free_rx_filter;
5706 }
5707
5708 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005709 mutex_init(&adapter->mcc_lock);
5710 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005711 spin_lock_init(&adapter->mcc_cq_lock);
5712 init_completion(&adapter->et_cmd_compl);
5713
5714 pci_save_state(adapter->pdev);
5715
5716 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305717
5718 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5719 adapter->error_recovery.resched_delay = 0;
5720 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005721 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005722
5723 adapter->rx_fc = true;
5724 adapter->tx_fc = true;
5725
5726 /* Must be a power of 2 or else MODULO will BUG_ON */
5727 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005728
5729 return 0;
5730
5731free_rx_filter:
5732 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5733free_mbox:
5734 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5735 mbox_mem_alloc->dma);
5736 return status;
5737}
5738
5739static void be_remove(struct pci_dev *pdev)
5740{
5741 struct be_adapter *adapter = pci_get_drvdata(pdev);
5742
5743 if (!adapter)
5744 return;
5745
5746 be_roce_dev_remove(adapter);
5747 be_intr_set(adapter, false);
5748
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005749 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005750
5751 unregister_netdev(adapter->netdev);
5752
5753 be_clear(adapter);
5754
Somnath Koturf72099e2016-09-07 19:57:50 +05305755 if (!pci_vfs_assigned(adapter->pdev))
5756 be_cmd_reset_function(adapter);
5757
Sathya Perla78fad34e2015-02-23 04:20:08 -05005758 /* tell fw we're done with firing cmds */
5759 be_cmd_fw_clean(adapter);
5760
5761 be_unmap_pci_bars(adapter);
5762 be_drv_cleanup(adapter);
5763
5764 pci_disable_pcie_error_reporting(pdev);
5765
5766 pci_release_regions(pdev);
5767 pci_disable_device(pdev);
5768
5769 free_netdev(adapter->netdev);
5770}
5771
Arnd Bergmann9a032592015-05-18 23:06:45 +02005772static ssize_t be_hwmon_show_temp(struct device *dev,
5773 struct device_attribute *dev_attr,
5774 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305775{
5776 struct be_adapter *adapter = dev_get_drvdata(dev);
5777
5778 /* Unit: millidegree Celsius */
5779 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5780 return -EIO;
5781 else
5782 return sprintf(buf, "%u\n",
5783 adapter->hwmon_info.be_on_die_temp * 1000);
5784}
5785
5786static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5787 be_hwmon_show_temp, NULL, 1);
5788
5789static struct attribute *be_hwmon_attrs[] = {
5790 &sensor_dev_attr_temp1_input.dev_attr.attr,
5791 NULL
5792};
5793
5794ATTRIBUTE_GROUPS(be_hwmon);
5795
Sathya Perlad3791422012-09-28 04:39:44 +00005796static char *mc_name(struct be_adapter *adapter)
5797{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305798 char *str = ""; /* default */
5799
5800 switch (adapter->mc_type) {
5801 case UMC:
5802 str = "UMC";
5803 break;
5804 case FLEX10:
5805 str = "FLEX10";
5806 break;
5807 case vNIC1:
5808 str = "vNIC-1";
5809 break;
5810 case nPAR:
5811 str = "nPAR";
5812 break;
5813 case UFP:
5814 str = "UFP";
5815 break;
5816 case vNIC2:
5817 str = "vNIC-2";
5818 break;
5819 default:
5820 str = "";
5821 }
5822
5823 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005824}
5825
5826static inline char *func_name(struct be_adapter *adapter)
5827{
5828 return be_physfn(adapter) ? "PF" : "VF";
5829}
5830
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005831static inline char *nic_name(struct pci_dev *pdev)
5832{
5833 switch (pdev->device) {
5834 case OC_DEVICE_ID1:
5835 return OC_NAME;
5836 case OC_DEVICE_ID2:
5837 return OC_NAME_BE;
5838 case OC_DEVICE_ID3:
5839 case OC_DEVICE_ID4:
5840 return OC_NAME_LANCER;
5841 case BE_DEVICE_ID2:
5842 return BE3_NAME;
5843 case OC_DEVICE_ID5:
5844 case OC_DEVICE_ID6:
5845 return OC_NAME_SH;
5846 default:
5847 return BE_NAME;
5848 }
5849}
5850
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005851static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005852{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005853 struct be_adapter *adapter;
5854 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005855 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005856
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305857 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5858
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005859 status = pci_enable_device(pdev);
5860 if (status)
5861 goto do_none;
5862
5863 status = pci_request_regions(pdev, DRV_NAME);
5864 if (status)
5865 goto disable_dev;
5866 pci_set_master(pdev);
5867
Sathya Perla7f640062012-06-05 19:37:20 +00005868 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305869 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005870 status = -ENOMEM;
5871 goto rel_reg;
5872 }
5873 adapter = netdev_priv(netdev);
5874 adapter->pdev = pdev;
5875 pci_set_drvdata(pdev, adapter);
5876 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005877 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005878
Russell King4c15c242013-06-26 23:49:11 +01005879 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005880 if (!status) {
5881 netdev->features |= NETIF_F_HIGHDMA;
5882 } else {
Russell King4c15c242013-06-26 23:49:11 +01005883 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005884 if (status) {
5885 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5886 goto free_netdev;
5887 }
5888 }
5889
Kalesh AP2f951a92014-09-12 17:39:21 +05305890 status = pci_enable_pcie_error_reporting(pdev);
5891 if (!status)
5892 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005893
Sathya Perla78fad34e2015-02-23 04:20:08 -05005894 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005895 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005896 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005897
Sathya Perla78fad34e2015-02-23 04:20:08 -05005898 status = be_drv_init(adapter);
5899 if (status)
5900 goto unmap_bars;
5901
Sathya Perla5fb379e2009-06-18 00:02:59 +00005902 status = be_setup(adapter);
5903 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005904 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005905
Sathya Perla3abcded2010-10-03 22:12:27 -07005906 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005907 status = register_netdev(netdev);
5908 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005909 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005910
Parav Pandit045508a2012-03-26 14:27:13 +00005911 be_roce_dev_add(adapter);
5912
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305913 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305914 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005915
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305916 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005917 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305918 adapter->hwmon_info.hwmon_dev =
5919 devm_hwmon_device_register_with_groups(&pdev->dev,
5920 DRV_NAME,
5921 adapter,
5922 be_hwmon_groups);
5923 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5924 }
5925
Sathya Perlad3791422012-09-28 04:39:44 +00005926 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005927 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005928
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005929 return 0;
5930
Sathya Perla5fb379e2009-06-18 00:02:59 +00005931unsetup:
5932 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005933drv_cleanup:
5934 be_drv_cleanup(adapter);
5935unmap_bars:
5936 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005937free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005938 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005939rel_reg:
5940 pci_release_regions(pdev);
5941disable_dev:
5942 pci_disable_device(pdev);
5943do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005944 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005945 return status;
5946}
5947
5948static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5949{
5950 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005951
Ajit Khaparded4360d62013-11-22 12:51:09 -06005952 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005953 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005954
Kalesh AP87ac1a52015-02-23 04:20:15 -05005955 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005956
5957 pci_save_state(pdev);
5958 pci_disable_device(pdev);
5959 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5960 return 0;
5961}
5962
Kalesh AP484d76f2015-02-23 04:20:14 -05005963static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005964{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005965 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005966 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005967
5968 status = pci_enable_device(pdev);
5969 if (status)
5970 return status;
5971
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005972 pci_restore_state(pdev);
5973
Kalesh AP484d76f2015-02-23 04:20:14 -05005974 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005975 if (status)
5976 return status;
5977
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305978 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005979
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005980 return 0;
5981}
5982
Sathya Perla82456b02010-02-17 01:35:37 +00005983/*
5984 * An FLR will stop BE from DMAing any data.
5985 */
5986static void be_shutdown(struct pci_dev *pdev)
5987{
5988 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005989
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005990 if (!adapter)
5991 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005992
Devesh Sharmad114f992014-06-10 19:32:15 +05305993 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005994 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005995 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005996
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005997 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005998
Ajit Khaparde57841862011-04-06 18:08:43 +00005999 be_cmd_reset_function(adapter);
6000
Sathya Perla82456b02010-02-17 01:35:37 +00006001 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006002}
6003
Sathya Perlacf588472010-02-14 21:22:01 +00006004static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05306005 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00006006{
6007 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006008
6009 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6010
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306011 be_roce_dev_remove(adapter);
6012
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306013 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6014 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00006015
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006016 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00006017
Kalesh AP87ac1a52015-02-23 04:20:15 -05006018 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006019 }
Sathya Perlacf588472010-02-14 21:22:01 +00006020
6021 if (state == pci_channel_io_perm_failure)
6022 return PCI_ERS_RESULT_DISCONNECT;
6023
6024 pci_disable_device(pdev);
6025
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006026 /* The error could cause the FW to trigger a flash debug dump.
6027 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006028 * can cause it not to recover; wait for it to finish.
6029 * Wait only for first function as it is needed only once per
6030 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006031 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006032 if (pdev->devfn == 0)
6033 ssleep(30);
6034
Sathya Perlacf588472010-02-14 21:22:01 +00006035 return PCI_ERS_RESULT_NEED_RESET;
6036}
6037
6038static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6039{
6040 struct be_adapter *adapter = pci_get_drvdata(pdev);
6041 int status;
6042
6043 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006044
6045 status = pci_enable_device(pdev);
6046 if (status)
6047 return PCI_ERS_RESULT_DISCONNECT;
6048
6049 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006050 pci_restore_state(pdev);
6051
6052 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006053 dev_info(&adapter->pdev->dev,
6054 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006055 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006056 if (status)
6057 return PCI_ERS_RESULT_DISCONNECT;
6058
Sathya Perlad6b6d982012-09-05 01:56:48 +00006059 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306060 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006061 return PCI_ERS_RESULT_RECOVERED;
6062}
6063
6064static void be_eeh_resume(struct pci_dev *pdev)
6065{
6066 int status = 0;
6067 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006068
6069 dev_info(&adapter->pdev->dev, "EEH resume\n");
6070
6071 pci_save_state(pdev);
6072
Kalesh AP484d76f2015-02-23 04:20:14 -05006073 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006074 if (status)
6075 goto err;
6076
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306077 be_roce_dev_add(adapter);
6078
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306079 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006080 return;
6081err:
6082 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006083}
6084
Vasundhara Volamace40af2015-03-04 00:44:34 -05006085static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6086{
6087 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006088 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006089 int status;
6090
6091 if (!num_vfs)
6092 be_vf_clear(adapter);
6093
6094 adapter->num_vfs = num_vfs;
6095
6096 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6097 dev_warn(&pdev->dev,
6098 "Cannot disable VFs while they are assigned\n");
6099 return -EBUSY;
6100 }
6101
6102 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6103 * are equally distributed across the max-number of VFs. The user may
6104 * request only a subset of the max-vfs to be enabled.
6105 * Based on num_vfs, redistribute the resources across num_vfs so that
6106 * each VF will have access to more number of resources.
6107 * This facility is not available in BE3 FW.
6108 * Also, this is done by FW in Lancer chip.
6109 */
6110 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006111 be_calculate_vf_res(adapter, adapter->num_vfs,
6112 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006113 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006114 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006115 if (status)
6116 dev_err(&pdev->dev,
6117 "Failed to optimize SR-IOV resources\n");
6118 }
6119
6120 status = be_get_resources(adapter);
6121 if (status)
6122 return be_cmd_status(status);
6123
6124 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6125 rtnl_lock();
6126 status = be_update_queues(adapter);
6127 rtnl_unlock();
6128 if (status)
6129 return be_cmd_status(status);
6130
6131 if (adapter->num_vfs)
6132 status = be_vf_setup(adapter);
6133
6134 if (!status)
6135 return adapter->num_vfs;
6136
6137 return 0;
6138}
6139
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006140static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006141 .error_detected = be_eeh_err_detected,
6142 .slot_reset = be_eeh_reset,
6143 .resume = be_eeh_resume,
6144};
6145
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006146static struct pci_driver be_driver = {
6147 .name = DRV_NAME,
6148 .id_table = be_dev_ids,
6149 .probe = be_probe,
6150 .remove = be_remove,
6151 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006152 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006153 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006154 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006155 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006156};
6157
6158static int __init be_init_module(void)
6159{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306160 int status;
6161
Joe Perches8e95a202009-12-03 07:58:21 +00006162 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6163 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006164 printk(KERN_WARNING DRV_NAME
6165 " : Module param rx_frag_size must be 2048/4096/8192."
6166 " Using 2048\n");
6167 rx_frag_size = 2048;
6168 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006169
Vasundhara Volamace40af2015-03-04 00:44:34 -05006170 if (num_vfs > 0) {
6171 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6172 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6173 }
6174
Sathya Perlab7172412016-07-27 05:26:18 -04006175 be_wq = create_singlethread_workqueue("be_wq");
6176 if (!be_wq) {
6177 pr_warn(DRV_NAME "workqueue creation failed\n");
6178 return -1;
6179 }
6180
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306181 be_err_recovery_workq =
6182 create_singlethread_workqueue("be_err_recover");
6183 if (!be_err_recovery_workq)
6184 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6185
6186 status = pci_register_driver(&be_driver);
6187 if (status) {
6188 destroy_workqueue(be_wq);
6189 be_destroy_err_recovery_workq();
6190 }
6191 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006192}
6193module_init(be_init_module);
6194
6195static void __exit be_exit_module(void)
6196{
6197 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006198
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306199 be_destroy_err_recovery_workq();
6200
Sathya Perlab7172412016-07-27 05:26:18 -04006201 if (be_wq)
6202 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006203}
6204module_exit(be_exit_module);