blob: 9a94840c57574b328cd06f696ebd24a0881fe370 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
47struct workqueue_struct *be_err_recovery_workq;
48
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
63struct workqueue_struct *be_wq;
64
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Suresh Reddy988d44b2016-09-07 19:57:52 +0530272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
278 if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
279 mac)) {
280 /* mac already added, skip addition */
281 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
282 return 0;
283 }
284 }
285
286 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
287 &adapter->pmac_id[0], 0);
288}
289
290static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
291{
292 int i;
293
294 /* Skip deletion if the programmed mac is
295 * being used in uc-list
296 */
297 for (i = 0; i < adapter->uc_macs; i++) {
298 if (adapter->pmac_id[i + 1] == pmac_id)
299 return;
300 }
301 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
302}
303
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700304static int be_mac_addr_set(struct net_device *netdev, void *p)
305{
306 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530309 int status;
310 u8 mac[ETH_ALEN];
Suresh Reddy988d44b2016-09-07 19:57:52 +0530311 u32 old_pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000313 if (!is_valid_ether_addr(addr->sa_data))
314 return -EADDRNOTAVAIL;
315
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530316 /* Proceed further only if, User provided MAC is different
317 * from active MAC
318 */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530319 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530320 return 0;
321
Kalesh APbcc84142015-08-05 03:27:48 -0400322 /* if device is not running, copy MAC to netdev->dev_addr */
323 if (!netif_running(netdev))
324 goto done;
325
Sathya Perla5a712c12013-07-23 15:24:59 +0530326 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
327 * privilege or if PF did not provision the new MAC address.
328 * On BE3, this cmd will always fail if the VF doesn't have the
329 * FILTMGMT privilege. This failure is OK, only if the PF programmed
330 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000331 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530332 mutex_lock(&adapter->rx_filter_lock);
333 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
Sathya Perla5a712c12013-07-23 15:24:59 +0530334 if (!status) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530335
336 /* Delete the old programmed MAC. This call may fail if the
337 * old MAC was already deleted by the PF driver.
338 */
339 if (adapter->pmac_id[0] != old_pmac_id)
Suresh Reddy988d44b2016-09-07 19:57:52 +0530340 be_dev_mac_del(adapter, old_pmac_id);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000341 }
342
Suresh Reddy988d44b2016-09-07 19:57:52 +0530343 mutex_unlock(&adapter->rx_filter_lock);
Sathya Perla5a712c12013-07-23 15:24:59 +0530344 /* Decide if the new MAC is successfully activated only after
345 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000346 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530347 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
Suresh Reddyb188f092014-01-15 13:23:39 +0530348 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000349 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000350 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700351
Sathya Perla5a712c12013-07-23 15:24:59 +0530352 /* The MAC change did not happen, either due to lack of privilege
353 * or PF didn't pre-provision.
354 */
dingtianhong61d23e92013-12-30 15:40:43 +0800355 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530356 status = -EPERM;
357 goto err;
358 }
Kalesh APbcc84142015-08-05 03:27:48 -0400359done:
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530360 ether_addr_copy(adapter->dev_mac, addr->sa_data);
Kalesh APbcc84142015-08-05 03:27:48 -0400361 ether_addr_copy(netdev->dev_addr, addr->sa_data);
362 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000363 return 0;
364err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530365 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700366 return status;
367}
368
Sathya Perlaca34fe32012-11-06 17:48:56 +0000369/* BE2 supports only v0 cmd */
370static void *hw_stats_from_cmd(struct be_adapter *adapter)
371{
372 if (BE2_chip(adapter)) {
373 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
374
375 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500376 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000377 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
378
379 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500380 } else {
381 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
382
383 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000384 }
385}
386
387/* BE2 supports only v0 cmd */
388static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
389{
390 if (BE2_chip(adapter)) {
391 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
392
393 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500394 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396
397 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500398 } else {
399 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
400
401 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000402 }
403}
404
405static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000407 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
408 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
409 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 &rxf_stats->port[adapter->port_num];
412 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->rx_pause_frames = port_stats->rx_pause_frames;
416 drvs->rx_crc_errors = port_stats->rx_crc_errors;
417 drvs->rx_control_frames = port_stats->rx_control_frames;
418 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
419 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
420 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
421 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
422 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
423 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
424 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000429 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->rx_dropped_header_too_small =
431 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000432 drvs->rx_address_filtered =
433 port_stats->rx_address_filtered +
434 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000435 drvs->rx_alignment_symbol_errors =
436 port_stats->rx_alignment_symbol_errors;
437
438 drvs->tx_pauseframes = port_stats->tx_pauseframes;
439 drvs->tx_controlframes = port_stats->tx_controlframes;
440
441 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000442 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000444 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
Sathya Perlaca34fe32012-11-06 17:48:56 +0000454static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000455{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000456 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000459 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perlaac124ff2011-07-25 19:10:14 +0000463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
498}
499
Ajit Khaparde61000862013-10-03 16:16:33 -0500500static void populate_be_v2_stats(struct be_adapter *adapter)
501{
502 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
503 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
504 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
505 struct be_port_rxf_stats_v2 *port_stats =
506 &rxf_stats->port[adapter->port_num];
507 struct be_drv_stats *drvs = &adapter->drv_stats;
508
509 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
510 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
511 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
512 drvs->rx_pause_frames = port_stats->rx_pause_frames;
513 drvs->rx_crc_errors = port_stats->rx_crc_errors;
514 drvs->rx_control_frames = port_stats->rx_control_frames;
515 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
516 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
517 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
518 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
519 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
520 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
521 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
522 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
523 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
524 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
525 drvs->rx_dropped_header_too_small =
526 port_stats->rx_dropped_header_too_small;
527 drvs->rx_input_fifo_overflow_drop =
528 port_stats->rx_input_fifo_overflow_drop;
529 drvs->rx_address_filtered = port_stats->rx_address_filtered;
530 drvs->rx_alignment_symbol_errors =
531 port_stats->rx_alignment_symbol_errors;
532 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
533 drvs->tx_pauseframes = port_stats->tx_pauseframes;
534 drvs->tx_controlframes = port_stats->tx_controlframes;
535 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
536 drvs->jabber_events = port_stats->jabber_events;
537 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
538 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
539 drvs->forwarded_packets = rxf_stats->forwarded_packets;
540 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
541 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
542 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
543 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530544 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500545 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
546 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
547 drvs->rx_roce_frames = port_stats->roce_frames_received;
548 drvs->roce_drops_crc = port_stats->roce_drops_crc;
549 drvs->roce_drops_payload_len =
550 port_stats->roce_drops_payload_len;
551 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500552}
553
Selvin Xavier005d5692011-05-16 07:36:35 +0000554static void populate_lancer_stats(struct be_adapter *adapter)
555{
Selvin Xavier005d5692011-05-16 07:36:35 +0000556 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530557 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
559 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
560 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
561 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
562 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000564 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
566 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
567 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
568 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
569 drvs->rx_dropped_tcp_length =
570 pport_stats->rx_dropped_invalid_tcp_length;
571 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
572 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
573 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
574 drvs->rx_dropped_header_too_small =
575 pport_stats->rx_dropped_header_too_small;
576 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000577 drvs->rx_address_filtered =
578 pport_stats->rx_address_filtered +
579 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000580 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000581 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000582 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
583 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000584 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000585 drvs->forwarded_packets = pport_stats->num_forwards_lo;
586 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000587 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000589}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590
Sathya Perla09c1c682011-08-22 19:41:53 +0000591static void accumulate_16bit_val(u32 *acc, u16 val)
592{
593#define lo(x) (x & 0xFFFF)
594#define hi(x) (x & 0xFFFF0000)
595 bool wrapped = val < lo(*acc);
596 u32 newacc = hi(*acc) + val;
597
598 if (wrapped)
599 newacc += 65536;
600 ACCESS_ONCE(*acc) = newacc;
601}
602
Jingoo Han4188e7d2013-08-05 18:02:02 +0900603static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530604 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000605{
606 if (!BEx_chip(adapter))
607 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
608 else
609 /* below erx HW counter can actually wrap around after
610 * 65535. Driver accumulates a 32-bit value
611 */
612 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
613 (u16)erx_stat);
614}
615
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616void be_parse_stats(struct be_adapter *adapter)
617{
Ajit Khaparde61000862013-10-03 16:16:33 -0500618 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000619 struct be_rx_obj *rxo;
620 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000621 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000622
Sathya Perlaca34fe32012-11-06 17:48:56 +0000623 if (lancer_chip(adapter)) {
624 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000625 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000626 if (BE2_chip(adapter))
627 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500628 else if (BE3_chip(adapter))
629 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000630 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500631 else
632 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000633
Ajit Khaparde61000862013-10-03 16:16:33 -0500634 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000635 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000636 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
637 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000638 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000639 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000640}
641
Sathya Perlaab1594e2011-07-25 19:10:15 +0000642static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530643 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000645 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000646 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700647 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000648 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 u64 pkts, bytes;
650 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700651 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
Sathya Perla3abcded2010-10-03 22:12:27 -0700653 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530655
Sathya Perlaab1594e2011-07-25 19:10:15 +0000656 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700657 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000658 pkts = rx_stats(rxo)->rx_pkts;
659 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700660 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 stats->rx_packets += pkts;
662 stats->rx_bytes += bytes;
663 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
664 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
665 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700666 }
667
Sathya Perla3c8def92011-06-12 20:01:58 +0000668 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530670
Sathya Perlaab1594e2011-07-25 19:10:15 +0000671 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700672 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 pkts = tx_stats(txo)->tx_pkts;
674 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700675 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000676 stats->tx_packets += pkts;
677 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000678 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679
680 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000681 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000682 drvs->rx_alignment_symbol_errors +
683 drvs->rx_in_range_errors +
684 drvs->rx_out_range_errors +
685 drvs->rx_frame_too_long +
686 drvs->rx_dropped_too_small +
687 drvs->rx_dropped_too_short +
688 drvs->rx_dropped_header_too_small +
689 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000690 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000693 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000694 drvs->rx_out_range_errors +
695 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000696
Sathya Perlaab1594e2011-07-25 19:10:15 +0000697 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698
699 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000700 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000701
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 /* receiver fifo overrun */
703 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000704 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000705 drvs->rx_input_fifo_overflow_drop +
706 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000707 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708}
709
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000710void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 struct net_device *netdev = adapter->netdev;
713
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000714 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000715 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000716 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000718
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530719 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000720 netif_carrier_on(netdev);
721 else
722 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200723
724 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725}
726
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500727static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700728{
Sathya Perla3c8def92011-06-12 20:01:58 +0000729 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530730 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000731
Sathya Perlaab1594e2011-07-25 19:10:15 +0000732 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000733 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500734 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530735 stats->tx_pkts += tx_pkts;
736 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
737 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000738 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739}
740
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500741/* Returns number of WRBs needed for the skb */
742static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700743{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500744 /* +1 for the header wrb */
745 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746}
747
748static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
749{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500750 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
751 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
752 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
753 wrb->rsvd0 = 0;
754}
755
756/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
757 * to avoid the swap and shift/mask operations in wrb_fill().
758 */
759static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
760{
761 wrb->frag_pa_hi = 0;
762 wrb->frag_pa_lo = 0;
763 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000764 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765}
766
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000767static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530768 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769{
770 u8 vlan_prio;
771 u16 vlan_tag;
772
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100773 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000774 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
775 /* If vlan priority provided by OS is NOT in available bmap */
776 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
777 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500778 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000779
780 return vlan_tag;
781}
782
Sathya Perlac9c47142014-03-27 10:46:19 +0530783/* Used only for IP tunnel packets */
784static u16 skb_inner_ip_proto(struct sk_buff *skb)
785{
786 return (inner_ip_hdr(skb)->version == 4) ?
787 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
788}
789
790static u16 skb_ip_proto(struct sk_buff *skb)
791{
792 return (ip_hdr(skb)->version == 4) ?
793 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
794}
795
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530796static inline bool be_is_txq_full(struct be_tx_obj *txo)
797{
798 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
799}
800
801static inline bool be_can_txq_wake(struct be_tx_obj *txo)
802{
803 return atomic_read(&txo->q.used) < txo->q.len / 2;
804}
805
806static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
807{
808 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
809}
810
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530811static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
812 struct sk_buff *skb,
813 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530815 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000817 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530818 BE_WRB_F_SET(wrb_params->features, LSO, 1);
819 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000820 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530821 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530823 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530824 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530825 proto = skb_inner_ip_proto(skb);
826 } else {
827 proto = skb_ip_proto(skb);
828 }
829 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530830 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530831 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530832 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100835 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530836 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
837 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838 }
839
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530840 BE_WRB_F_SET(wrb_params->features, CRC, 1);
841}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500842
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530843static void wrb_fill_hdr(struct be_adapter *adapter,
844 struct be_eth_hdr_wrb *hdr,
845 struct be_wrb_params *wrb_params,
846 struct sk_buff *skb)
847{
848 memset(hdr, 0, sizeof(*hdr));
849
850 SET_TX_WRB_HDR_BITS(crc, hdr,
851 BE_WRB_F_GET(wrb_params->features, CRC));
852 SET_TX_WRB_HDR_BITS(ipcs, hdr,
853 BE_WRB_F_GET(wrb_params->features, IPCS));
854 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
855 BE_WRB_F_GET(wrb_params->features, TCPCS));
856 SET_TX_WRB_HDR_BITS(udpcs, hdr,
857 BE_WRB_F_GET(wrb_params->features, UDPCS));
858
859 SET_TX_WRB_HDR_BITS(lso, hdr,
860 BE_WRB_F_GET(wrb_params->features, LSO));
861 SET_TX_WRB_HDR_BITS(lso6, hdr,
862 BE_WRB_F_GET(wrb_params->features, LSO6));
863 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
864
865 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
866 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500867 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530868 SET_TX_WRB_HDR_BITS(event, hdr,
869 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
870 SET_TX_WRB_HDR_BITS(vlan, hdr,
871 BE_WRB_F_GET(wrb_params->features, VLAN));
872 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
873
874 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
875 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530876 SET_TX_WRB_HDR_BITS(mgmt, hdr,
877 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878}
879
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000880static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530881 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000882{
883 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500884 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000885
Sathya Perla7101e112010-03-22 20:41:12 +0000886
Sathya Perlaf986afc2015-02-06 08:18:43 -0500887 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
888 (u64)le32_to_cpu(wrb->frag_pa_lo);
889 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000890 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500891 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000892 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500893 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000894 }
895}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530897/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530898static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530900 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530902 queue_head_inc(&txo->q);
903 return head;
904}
905
906/* Set up the WRB header for xmit */
907static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
908 struct be_tx_obj *txo,
909 struct be_wrb_params *wrb_params,
910 struct sk_buff *skb, u16 head)
911{
912 u32 num_frags = skb_wrb_cnt(skb);
913 struct be_queue_info *txq = &txo->q;
914 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
915
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530916 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500917 be_dws_cpu_to_le(hdr, sizeof(*hdr));
918
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500919 BUG_ON(txo->sent_skb_list[head]);
920 txo->sent_skb_list[head] = skb;
921 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530922 atomic_add(num_frags, &txq->used);
923 txo->last_req_wrb_cnt = num_frags;
924 txo->pend_wrb_cnt += num_frags;
925}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700926
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530927/* Setup a WRB fragment (buffer descriptor) for xmit */
928static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
929 int len)
930{
931 struct be_eth_wrb *wrb;
932 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530934 wrb = queue_head_node(txq);
935 wrb_fill(wrb, busaddr, len);
936 queue_head_inc(txq);
937}
938
939/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
940 * was invoked. The producer index is restored to the previous packet and the
941 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
942 */
943static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530944 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530945 u32 copied)
946{
947 struct device *dev;
948 struct be_eth_wrb *wrb;
949 struct be_queue_info *txq = &txo->q;
950
951 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500952 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530953
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500954 /* skip the first wrb (hdr); it's not mapped */
955 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000956 while (copied) {
957 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000958 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000959 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500960 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000961 queue_head_inc(txq);
962 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530963
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500964 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530965}
966
967/* Enqueue the given packet for transmit. This routine allocates WRBs for the
968 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
969 * of WRBs used up by the packet.
970 */
971static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
972 struct sk_buff *skb,
973 struct be_wrb_params *wrb_params)
974{
975 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
976 struct device *dev = &adapter->pdev->dev;
977 struct be_queue_info *txq = &txo->q;
978 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530979 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530980 dma_addr_t busaddr;
981 int len;
982
983 head = be_tx_get_wrb_hdr(txo);
984
985 if (skb->len > skb->data_len) {
986 len = skb_headlen(skb);
987
988 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
989 if (dma_mapping_error(dev, busaddr))
990 goto dma_err;
991 map_single = true;
992 be_tx_setup_wrb_frag(txo, busaddr, len);
993 copied += len;
994 }
995
996 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
997 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
998 len = skb_frag_size(frag);
999
1000 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1001 if (dma_mapping_error(dev, busaddr))
1002 goto dma_err;
1003 be_tx_setup_wrb_frag(txo, busaddr, len);
1004 copied += len;
1005 }
1006
1007 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1008
1009 be_tx_stats_update(txo, skb);
1010 return wrb_cnt;
1011
1012dma_err:
1013 adapter->drv_stats.dma_map_errors++;
1014 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +00001015 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016}
1017
Sathya Perlaf7062ee2015-02-06 08:18:35 -05001018static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1019{
1020 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1021}
1022
Somnath Kotur93040ae2012-06-26 22:32:10 +00001023static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001024 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301025 struct be_wrb_params
1026 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +00001027{
1028 u16 vlan_tag = 0;
1029
1030 skb = skb_share_check(skb, GFP_ATOMIC);
1031 if (unlikely(!skb))
1032 return skb;
1033
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001034 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001035 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301036
1037 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1038 if (!vlan_tag)
1039 vlan_tag = adapter->pvid;
1040 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1041 * skip VLAN insertion
1042 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301043 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301044 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001045
1046 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001047 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1048 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001049 if (unlikely(!skb))
1050 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051 skb->vlan_tci = 0;
1052 }
1053
1054 /* Insert the outer VLAN, if any */
1055 if (adapter->qnq_vid) {
1056 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001057 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1058 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001059 if (unlikely(!skb))
1060 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301061 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001062 }
1063
Somnath Kotur93040ae2012-06-26 22:32:10 +00001064 return skb;
1065}
1066
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001067static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1068{
1069 struct ethhdr *eh = (struct ethhdr *)skb->data;
1070 u16 offset = ETH_HLEN;
1071
1072 if (eh->h_proto == htons(ETH_P_IPV6)) {
1073 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1074
1075 offset += sizeof(struct ipv6hdr);
1076 if (ip6h->nexthdr != NEXTHDR_TCP &&
1077 ip6h->nexthdr != NEXTHDR_UDP) {
1078 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301079 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001080
1081 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1082 if (ehdr->hdrlen == 0xff)
1083 return true;
1084 }
1085 }
1086 return false;
1087}
1088
1089static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1090{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001091 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001092}
1093
Sathya Perla748b5392014-05-09 13:29:13 +05301094static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001095{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001096 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001097}
1098
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301099static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1100 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301101 struct be_wrb_params
1102 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001104 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001105 unsigned int eth_hdr_len;
1106 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001107
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001108 /* For padded packets, BE HW modifies tot_len field in IP header
1109 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001110 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001111 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001112 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1113 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001114 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001115 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001116 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001117 ip = (struct iphdr *)ip_hdr(skb);
1118 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1119 }
1120
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001121 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301122 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001123 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301124 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001125 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301126 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001127
Somnath Kotur93040ae2012-06-26 22:32:10 +00001128 /* HW has a bug wherein it will calculate CSUM for VLAN
1129 * pkts even though it is disabled.
1130 * Manually insert VLAN in pkt.
1131 */
1132 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001133 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301134 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001135 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301136 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001137 }
1138
1139 /* HW may lockup when VLAN HW tagging is requested on
1140 * certain ipv6 packets. Drop such pkts if the HW workaround to
1141 * skip HW tagging is not enabled by FW.
1142 */
1143 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301144 (adapter->pvid || adapter->qnq_vid) &&
1145 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001146 goto tx_drop;
1147
1148 /* Manual VLAN tag insertion to prevent:
1149 * ASIC lockup when the ASIC inserts VLAN tag into
1150 * certain ipv6 packets. Insert VLAN tags in driver,
1151 * and set event, completion, vlan bits accordingly
1152 * in the Tx WRB.
1153 */
1154 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1155 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301156 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001157 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301158 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001159 }
1160
Sathya Perlaee9c7992013-05-22 23:04:55 +00001161 return skb;
1162tx_drop:
1163 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301164err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001165 return NULL;
1166}
1167
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301168static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1169 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301170 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301171{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301172 int err;
1173
Suresh Reddy8227e992015-10-12 03:47:19 -04001174 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1175 * packets that are 32b or less may cause a transmit stall
1176 * on that port. The workaround is to pad such packets
1177 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301178 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001179 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001180 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301181 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301182 }
1183
1184 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301185 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301186 if (!skb)
1187 return NULL;
1188 }
1189
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301190 /* The stack can send us skbs with length greater than
1191 * what the HW can handle. Trim the extra bytes.
1192 */
1193 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1194 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1195 WARN_ON(err);
1196
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301197 return skb;
1198}
1199
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001200static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1201{
1202 struct be_queue_info *txq = &txo->q;
1203 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1204
1205 /* Mark the last request eventable if it hasn't been marked already */
1206 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1207 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1208
1209 /* compose a dummy wrb if there are odd set of wrbs to notify */
1210 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001211 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001212 queue_head_inc(txq);
1213 atomic_inc(&txq->used);
1214 txo->pend_wrb_cnt++;
1215 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1216 TX_HDR_WRB_NUM_SHIFT);
1217 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1218 TX_HDR_WRB_NUM_SHIFT);
1219 }
1220 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1221 txo->pend_wrb_cnt = 0;
1222}
1223
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301224/* OS2BMC related */
1225
1226#define DHCP_CLIENT_PORT 68
1227#define DHCP_SERVER_PORT 67
1228#define NET_BIOS_PORT1 137
1229#define NET_BIOS_PORT2 138
1230#define DHCPV6_RAS_PORT 547
1231
1232#define is_mc_allowed_on_bmc(adapter, eh) \
1233 (!is_multicast_filt_enabled(adapter) && \
1234 is_multicast_ether_addr(eh->h_dest) && \
1235 !is_broadcast_ether_addr(eh->h_dest))
1236
1237#define is_bc_allowed_on_bmc(adapter, eh) \
1238 (!is_broadcast_filt_enabled(adapter) && \
1239 is_broadcast_ether_addr(eh->h_dest))
1240
1241#define is_arp_allowed_on_bmc(adapter, skb) \
1242 (is_arp(skb) && is_arp_filt_enabled(adapter))
1243
1244#define is_broadcast_packet(eh, adapter) \
1245 (is_multicast_ether_addr(eh->h_dest) && \
1246 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1247
1248#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1249
1250#define is_arp_filt_enabled(adapter) \
1251 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1252
1253#define is_dhcp_client_filt_enabled(adapter) \
1254 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1255
1256#define is_dhcp_srvr_filt_enabled(adapter) \
1257 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1258
1259#define is_nbios_filt_enabled(adapter) \
1260 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1261
1262#define is_ipv6_na_filt_enabled(adapter) \
1263 (adapter->bmc_filt_mask & \
1264 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1265
1266#define is_ipv6_ra_filt_enabled(adapter) \
1267 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1268
1269#define is_ipv6_ras_filt_enabled(adapter) \
1270 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1271
1272#define is_broadcast_filt_enabled(adapter) \
1273 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1274
1275#define is_multicast_filt_enabled(adapter) \
1276 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1277
1278static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1279 struct sk_buff **skb)
1280{
1281 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1282 bool os2bmc = false;
1283
1284 if (!be_is_os2bmc_enabled(adapter))
1285 goto done;
1286
1287 if (!is_multicast_ether_addr(eh->h_dest))
1288 goto done;
1289
1290 if (is_mc_allowed_on_bmc(adapter, eh) ||
1291 is_bc_allowed_on_bmc(adapter, eh) ||
1292 is_arp_allowed_on_bmc(adapter, (*skb))) {
1293 os2bmc = true;
1294 goto done;
1295 }
1296
1297 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1298 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1299 u8 nexthdr = hdr->nexthdr;
1300
1301 if (nexthdr == IPPROTO_ICMPV6) {
1302 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1303
1304 switch (icmp6->icmp6_type) {
1305 case NDISC_ROUTER_ADVERTISEMENT:
1306 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1307 goto done;
1308 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1309 os2bmc = is_ipv6_na_filt_enabled(adapter);
1310 goto done;
1311 default:
1312 break;
1313 }
1314 }
1315 }
1316
1317 if (is_udp_pkt((*skb))) {
1318 struct udphdr *udp = udp_hdr((*skb));
1319
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001320 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301321 case DHCP_CLIENT_PORT:
1322 os2bmc = is_dhcp_client_filt_enabled(adapter);
1323 goto done;
1324 case DHCP_SERVER_PORT:
1325 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1326 goto done;
1327 case NET_BIOS_PORT1:
1328 case NET_BIOS_PORT2:
1329 os2bmc = is_nbios_filt_enabled(adapter);
1330 goto done;
1331 case DHCPV6_RAS_PORT:
1332 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1333 goto done;
1334 default:
1335 break;
1336 }
1337 }
1338done:
1339 /* For packets over a vlan, which are destined
1340 * to BMC, asic expects the vlan to be inline in the packet.
1341 */
1342 if (os2bmc)
1343 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1344
1345 return os2bmc;
1346}
1347
Sathya Perlaee9c7992013-05-22 23:04:55 +00001348static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1349{
1350 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001351 u16 q_idx = skb_get_queue_mapping(skb);
1352 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301353 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301354 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001355 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001356
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301357 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001358 if (unlikely(!skb))
1359 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001360
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301361 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1362
1363 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001364 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001365 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001366 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001368
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301369 /* if os2bmc is enabled and if the pkt is destined to bmc,
1370 * enqueue the pkt a 2nd time with mgmt bit set.
1371 */
1372 if (be_send_pkt_to_bmc(adapter, &skb)) {
1373 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1374 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1375 if (unlikely(!wrb_cnt))
1376 goto drop;
1377 else
1378 skb_get(skb);
1379 }
1380
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301381 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001382 netif_stop_subqueue(netdev, q_idx);
1383 tx_stats(txo)->tx_stops++;
1384 }
1385
1386 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1387 be_xmit_flush(adapter, txo);
1388
1389 return NETDEV_TX_OK;
1390drop:
1391 tx_stats(txo)->tx_drv_drops++;
1392 /* Flush the already enqueued tx requests */
1393 if (flush && txo->pend_wrb_cnt)
1394 be_xmit_flush(adapter, txo);
1395
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 return NETDEV_TX_OK;
1397}
1398
1399static int be_change_mtu(struct net_device *netdev, int new_mtu)
1400{
1401 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301402 struct device *dev = &adapter->pdev->dev;
1403
1404 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1405 dev_info(dev, "MTU must be between %d and %d bytes\n",
1406 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 return -EINVAL;
1408 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301409
1410 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301411 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 netdev->mtu = new_mtu;
1413 return 0;
1414}
1415
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001416static inline bool be_in_all_promisc(struct be_adapter *adapter)
1417{
1418 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1419 BE_IF_FLAGS_ALL_PROMISCUOUS;
1420}
1421
1422static int be_set_vlan_promisc(struct be_adapter *adapter)
1423{
1424 struct device *dev = &adapter->pdev->dev;
1425 int status;
1426
1427 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1428 return 0;
1429
1430 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1431 if (!status) {
1432 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1433 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1434 } else {
1435 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1436 }
1437 return status;
1438}
1439
1440static int be_clear_vlan_promisc(struct be_adapter *adapter)
1441{
1442 struct device *dev = &adapter->pdev->dev;
1443 int status;
1444
1445 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1446 if (!status) {
1447 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1448 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1449 }
1450 return status;
1451}
1452
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001454 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1455 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 */
Sathya Perla10329df2012-06-05 19:37:18 +00001457static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458{
Vasundhara Volam50762662014-09-12 17:39:14 +05301459 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001460 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301461 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001462 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001463
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001464 /* No need to change the VLAN state if the I/F is in promiscuous */
1465 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001466 return 0;
1467
Sathya Perla92bf14a2013-08-27 16:57:32 +05301468 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001469 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001470
Somnath Kotur841f60f2016-07-27 05:26:15 -04001471 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1472 status = be_clear_vlan_promisc(adapter);
1473 if (status)
1474 return status;
1475 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001476 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301477 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1478 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001479
Vasundhara Volam435452a2015-03-20 06:28:23 -04001480 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001481 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001482 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001483 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001484 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1485 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301486 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001487 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001489 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
Patrick McHardy80d5c362013-04-19 02:04:28 +00001492static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493{
1494 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001495 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496
Sathya Perlab7172412016-07-27 05:26:18 -04001497 mutex_lock(&adapter->rx_filter_lock);
1498
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001499 /* Packets with VID 0 are always received by Lancer by default */
1500 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001501 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301502
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301503 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001504 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001505
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301506 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301507 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001508
Sathya Perlab7172412016-07-27 05:26:18 -04001509 status = be_vid_config(adapter);
1510done:
1511 mutex_unlock(&adapter->rx_filter_lock);
1512 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513}
1514
Patrick McHardy80d5c362013-04-19 02:04:28 +00001515static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516{
1517 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001518 int status = 0;
1519
1520 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001522 /* Packets with VID 0 are always received by Lancer by default */
1523 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001524 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001525
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301526 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001527 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301528
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301529 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301530 adapter->vlans_added--;
1531
Sathya Perlab7172412016-07-27 05:26:18 -04001532 status = be_vid_config(adapter);
1533done:
1534 mutex_unlock(&adapter->rx_filter_lock);
1535 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536}
1537
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001538static void be_set_all_promisc(struct be_adapter *adapter)
1539{
1540 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1541 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1542}
1543
1544static void be_set_mc_promisc(struct be_adapter *adapter)
1545{
1546 int status;
1547
1548 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1549 return;
1550
1551 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1552 if (!status)
1553 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1554}
1555
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001556static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001557{
1558 int status;
1559
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001560 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1561 return;
1562
1563 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001564 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001565 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1566}
1567
1568static void be_clear_uc_promisc(struct be_adapter *adapter)
1569{
1570 int status;
1571
1572 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1573 return;
1574
1575 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1576 if (!status)
1577 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1578}
1579
1580/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1581 * We use a single callback function for both sync and unsync. We really don't
1582 * add/remove addresses through this callback. But, we use it to detect changes
1583 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1584 */
1585static int be_uc_list_update(struct net_device *netdev,
1586 const unsigned char *addr)
1587{
1588 struct be_adapter *adapter = netdev_priv(netdev);
1589
1590 adapter->update_uc_list = true;
1591 return 0;
1592}
1593
1594static int be_mc_list_update(struct net_device *netdev,
1595 const unsigned char *addr)
1596{
1597 struct be_adapter *adapter = netdev_priv(netdev);
1598
1599 adapter->update_mc_list = true;
1600 return 0;
1601}
1602
1603static void be_set_mc_list(struct be_adapter *adapter)
1604{
1605 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001606 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001607 bool mc_promisc = false;
1608 int status;
1609
Sathya Perlab7172412016-07-27 05:26:18 -04001610 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001611 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1612
1613 if (netdev->flags & IFF_PROMISC) {
1614 adapter->update_mc_list = false;
1615 } else if (netdev->flags & IFF_ALLMULTI ||
1616 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1617 /* Enable multicast promisc if num configured exceeds
1618 * what we support
1619 */
1620 mc_promisc = true;
1621 adapter->update_mc_list = false;
1622 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1623 /* Update mc-list unconditionally if the iface was previously
1624 * in mc-promisc mode and now is out of that mode.
1625 */
1626 adapter->update_mc_list = true;
1627 }
1628
Sathya Perlab7172412016-07-27 05:26:18 -04001629 if (adapter->update_mc_list) {
1630 int i = 0;
1631
1632 /* cache the mc-list in adapter */
1633 netdev_for_each_mc_addr(ha, netdev) {
1634 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1635 i++;
1636 }
1637 adapter->mc_count = netdev_mc_count(netdev);
1638 }
1639 netif_addr_unlock_bh(netdev);
1640
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001641 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001642 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001643 } else if (adapter->update_mc_list) {
1644 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1645 if (!status)
1646 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1647 else
1648 be_set_mc_promisc(adapter);
1649
1650 adapter->update_mc_list = false;
1651 }
1652}
1653
1654static void be_clear_mc_list(struct be_adapter *adapter)
1655{
1656 struct net_device *netdev = adapter->netdev;
1657
1658 __dev_mc_unsync(netdev, NULL);
1659 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001660 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001661}
1662
Suresh Reddy988d44b2016-09-07 19:57:52 +05301663static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1664{
1665 if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
Suresh Reddyc27ebf52016-09-07 19:57:53 +05301666 adapter->dev_mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05301667 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1668 return 0;
1669 }
1670
1671 return be_cmd_pmac_add(adapter,
1672 (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1673 adapter->if_handle,
1674 &adapter->pmac_id[uc_idx + 1], 0);
1675}
1676
1677static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1678{
1679 if (pmac_id == adapter->pmac_id[0])
1680 return;
1681
1682 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1683}
1684
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001685static void be_set_uc_list(struct be_adapter *adapter)
1686{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001687 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001688 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001689 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001690 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001691
Sathya Perlab7172412016-07-27 05:26:18 -04001692 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001693 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001694
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001695 if (netdev->flags & IFF_PROMISC) {
1696 adapter->update_uc_list = false;
1697 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1698 uc_promisc = true;
1699 adapter->update_uc_list = false;
1700 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1701 /* Update uc-list unconditionally if the iface was previously
1702 * in uc-promisc mode and now is out of that mode.
1703 */
1704 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001705 }
1706
Sathya Perlab7172412016-07-27 05:26:18 -04001707 if (adapter->update_uc_list) {
1708 i = 1; /* First slot is claimed by the Primary MAC */
1709
1710 /* cache the uc-list in adapter array */
1711 netdev_for_each_uc_addr(ha, netdev) {
1712 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1713 i++;
1714 }
1715 curr_uc_macs = netdev_uc_count(netdev);
1716 }
1717 netif_addr_unlock_bh(netdev);
1718
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001719 if (uc_promisc) {
1720 be_set_uc_promisc(adapter);
1721 } else if (adapter->update_uc_list) {
1722 be_clear_uc_promisc(adapter);
1723
Sathya Perlab7172412016-07-27 05:26:18 -04001724 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301725 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001726
Sathya Perlab7172412016-07-27 05:26:18 -04001727 for (i = 0; i < curr_uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301728 be_uc_mac_add(adapter, i);
Sathya Perlab7172412016-07-27 05:26:18 -04001729 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001730 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001731 }
1732}
1733
1734static void be_clear_uc_list(struct be_adapter *adapter)
1735{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001736 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001737 int i;
1738
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001739 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001740 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301741 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1742
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001743 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301744}
1745
Sathya Perlab7172412016-07-27 05:26:18 -04001746static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747{
Sathya Perlab7172412016-07-27 05:26:18 -04001748 struct net_device *netdev = adapter->netdev;
1749
1750 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751
1752 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001753 if (!be_in_all_promisc(adapter))
1754 be_set_all_promisc(adapter);
1755 } else if (be_in_all_promisc(adapter)) {
1756 /* We need to re-program the vlan-list or clear
1757 * vlan-promisc mode (if needed) when the interface
1758 * comes out of promisc mode.
1759 */
1760 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001762
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001763 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001764 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001765
1766 mutex_unlock(&adapter->rx_filter_lock);
1767}
1768
1769static void be_work_set_rx_mode(struct work_struct *work)
1770{
1771 struct be_cmd_work *cmd_work =
1772 container_of(work, struct be_cmd_work, work);
1773
1774 __be_set_rx_mode(cmd_work->adapter);
1775 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776}
1777
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001778static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1779{
1780 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001781 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001782 int status;
1783
Sathya Perla11ac75e2011-12-13 00:58:50 +00001784 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001785 return -EPERM;
1786
Sathya Perla11ac75e2011-12-13 00:58:50 +00001787 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001788 return -EINVAL;
1789
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301790 /* Proceed further only if user provided MAC is different
1791 * from active MAC
1792 */
1793 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1794 return 0;
1795
Sathya Perla3175d8c2013-07-23 15:25:03 +05301796 if (BEx_chip(adapter)) {
1797 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1798 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001799
Sathya Perla11ac75e2011-12-13 00:58:50 +00001800 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1801 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301802 } else {
1803 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1804 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001805 }
1806
Kalesh APabccf232014-07-17 16:20:24 +05301807 if (status) {
1808 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1809 mac, vf, status);
1810 return be_cmd_status(status);
1811 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001812
Kalesh APabccf232014-07-17 16:20:24 +05301813 ether_addr_copy(vf_cfg->mac_addr, mac);
1814
1815 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001816}
1817
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001818static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301819 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001820{
1821 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001822 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001823
Sathya Perla11ac75e2011-12-13 00:58:50 +00001824 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001825 return -EPERM;
1826
Sathya Perla11ac75e2011-12-13 00:58:50 +00001827 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001828 return -EINVAL;
1829
1830 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001831 vi->max_tx_rate = vf_cfg->tx_rate;
1832 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001833 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1834 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001835 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301836 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001837 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001838
1839 return 0;
1840}
1841
Vasundhara Volam435452a2015-03-20 06:28:23 -04001842static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1843{
1844 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1845 u16 vids[BE_NUM_VLANS_SUPPORTED];
1846 int vf_if_id = vf_cfg->if_handle;
1847 int status;
1848
1849 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001850 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001851 if (status)
1852 return status;
1853
1854 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1855 vids[0] = 0;
1856 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1857 if (!status)
1858 dev_info(&adapter->pdev->dev,
1859 "Cleared guest VLANs on VF%d", vf);
1860
1861 /* After TVT is enabled, disallow VFs to program VLAN filters */
1862 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1863 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1864 ~BE_PRIV_FILTMGMT, vf + 1);
1865 if (!status)
1866 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1867 }
1868 return 0;
1869}
1870
1871static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1872{
1873 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1874 struct device *dev = &adapter->pdev->dev;
1875 int status;
1876
1877 /* Reset Transparent VLAN Tagging. */
1878 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001879 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001880 if (status)
1881 return status;
1882
1883 /* Allow VFs to program VLAN filtering */
1884 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1885 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1886 BE_PRIV_FILTMGMT, vf + 1);
1887 if (!status) {
1888 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1889 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1890 }
1891 }
1892
1893 dev_info(dev,
1894 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1895 return 0;
1896}
1897
Sathya Perla748b5392014-05-09 13:29:13 +05301898static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001899{
1900 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001901 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001902 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001903
Sathya Perla11ac75e2011-12-13 00:58:50 +00001904 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001905 return -EPERM;
1906
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001907 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001908 return -EINVAL;
1909
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001910 if (vlan || qos) {
1911 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001912 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001913 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001914 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001915 }
1916
Kalesh APabccf232014-07-17 16:20:24 +05301917 if (status) {
1918 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001919 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1920 status);
Kalesh APabccf232014-07-17 16:20:24 +05301921 return be_cmd_status(status);
1922 }
1923
1924 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301925 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001926}
1927
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001928static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1929 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001930{
1931 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301932 struct device *dev = &adapter->pdev->dev;
1933 int percent_rate, status = 0;
1934 u16 link_speed = 0;
1935 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001936
Sathya Perla11ac75e2011-12-13 00:58:50 +00001937 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001938 return -EPERM;
1939
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001940 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001941 return -EINVAL;
1942
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001943 if (min_tx_rate)
1944 return -EINVAL;
1945
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301946 if (!max_tx_rate)
1947 goto config_qos;
1948
1949 status = be_cmd_link_status_query(adapter, &link_speed,
1950 &link_status, 0);
1951 if (status)
1952 goto err;
1953
1954 if (!link_status) {
1955 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301956 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301957 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001958 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001959
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301960 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1961 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1962 link_speed);
1963 status = -EINVAL;
1964 goto err;
1965 }
1966
1967 /* On Skyhawk the QOS setting must be done only as a % value */
1968 percent_rate = link_speed / 100;
1969 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1970 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1971 percent_rate);
1972 status = -EINVAL;
1973 goto err;
1974 }
1975
1976config_qos:
1977 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001978 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301979 goto err;
1980
1981 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1982 return 0;
1983
1984err:
1985 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1986 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301987 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001988}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301989
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301990static int be_set_vf_link_state(struct net_device *netdev, int vf,
1991 int link_state)
1992{
1993 struct be_adapter *adapter = netdev_priv(netdev);
1994 int status;
1995
1996 if (!sriov_enabled(adapter))
1997 return -EPERM;
1998
1999 if (vf >= adapter->num_vfs)
2000 return -EINVAL;
2001
2002 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05302003 if (status) {
2004 dev_err(&adapter->pdev->dev,
2005 "Link state change on VF %d failed: %#x\n", vf, status);
2006 return be_cmd_status(status);
2007 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302008
Kalesh APabccf232014-07-17 16:20:24 +05302009 adapter->vf_cfg[vf].plink_tracking = link_state;
2010
2011 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302012}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002013
Kalesh APe7bcbd72015-05-06 05:30:32 -04002014static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2015{
2016 struct be_adapter *adapter = netdev_priv(netdev);
2017 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2018 u8 spoofchk;
2019 int status;
2020
2021 if (!sriov_enabled(adapter))
2022 return -EPERM;
2023
2024 if (vf >= adapter->num_vfs)
2025 return -EINVAL;
2026
2027 if (BEx_chip(adapter))
2028 return -EOPNOTSUPP;
2029
2030 if (enable == vf_cfg->spoofchk)
2031 return 0;
2032
2033 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2034
2035 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2036 0, spoofchk);
2037 if (status) {
2038 dev_err(&adapter->pdev->dev,
2039 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2040 return be_cmd_status(status);
2041 }
2042
2043 vf_cfg->spoofchk = enable;
2044 return 0;
2045}
2046
Sathya Perla2632baf2013-10-01 16:00:00 +05302047static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2048 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049{
Sathya Perla2632baf2013-10-01 16:00:00 +05302050 aic->rx_pkts_prev = rx_pkts;
2051 aic->tx_reqs_prev = tx_pkts;
2052 aic->jiffies = now;
2053}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002054
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002055static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302056{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002057 struct be_adapter *adapter = eqo->adapter;
2058 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302059 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302060 struct be_rx_obj *rxo;
2061 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002062 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302063 ulong now;
2064 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002065 int i;
2066
2067 aic = &adapter->aic_obj[eqo->idx];
2068 if (!aic->enable) {
2069 if (aic->jiffies)
2070 aic->jiffies = 0;
2071 eqd = aic->et_eqd;
2072 return eqd;
2073 }
2074
2075 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2076 do {
2077 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2078 rx_pkts += rxo->stats.rx_pkts;
2079 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2080 }
2081
2082 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2083 do {
2084 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2085 tx_pkts += txo->stats.tx_reqs;
2086 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2087 }
2088
2089 /* Skip, if wrapped around or first calculation */
2090 now = jiffies;
2091 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2092 rx_pkts < aic->rx_pkts_prev ||
2093 tx_pkts < aic->tx_reqs_prev) {
2094 be_aic_update(aic, rx_pkts, tx_pkts, now);
2095 return aic->prev_eqd;
2096 }
2097
2098 delta = jiffies_to_msecs(now - aic->jiffies);
2099 if (delta == 0)
2100 return aic->prev_eqd;
2101
2102 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2103 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2104 eqd = (pps / 15000) << 2;
2105
2106 if (eqd < 8)
2107 eqd = 0;
2108 eqd = min_t(u32, eqd, aic->max_eqd);
2109 eqd = max_t(u32, eqd, aic->min_eqd);
2110
2111 be_aic_update(aic, rx_pkts, tx_pkts, now);
2112
2113 return eqd;
2114}
2115
2116/* For Skyhawk-R only */
2117static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2118{
2119 struct be_adapter *adapter = eqo->adapter;
2120 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2121 ulong now = jiffies;
2122 int eqd;
2123 u32 mult_enc;
2124
2125 if (!aic->enable)
2126 return 0;
2127
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302128 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002129 eqd = aic->prev_eqd;
2130 else
2131 eqd = be_get_new_eqd(eqo);
2132
2133 if (eqd > 100)
2134 mult_enc = R2I_DLY_ENC_1;
2135 else if (eqd > 60)
2136 mult_enc = R2I_DLY_ENC_2;
2137 else if (eqd > 20)
2138 mult_enc = R2I_DLY_ENC_3;
2139 else
2140 mult_enc = R2I_DLY_ENC_0;
2141
2142 aic->prev_eqd = eqd;
2143
2144 return mult_enc;
2145}
2146
2147void be_eqd_update(struct be_adapter *adapter, bool force_update)
2148{
2149 struct be_set_eqd set_eqd[MAX_EVT_QS];
2150 struct be_aic_obj *aic;
2151 struct be_eq_obj *eqo;
2152 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153
Sathya Perla2632baf2013-10-01 16:00:00 +05302154 for_all_evt_queues(adapter, eqo, i) {
2155 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002156 eqd = be_get_new_eqd(eqo);
2157 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302158 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2159 set_eqd[num].eq_id = eqo->q.id;
2160 aic->prev_eqd = eqd;
2161 num++;
2162 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002163 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302164
2165 if (num)
2166 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002167}
2168
Sathya Perla3abcded2010-10-03 22:12:27 -07002169static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302170 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002171{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002172 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002173
Sathya Perlaab1594e2011-07-25 19:10:15 +00002174 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002175 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002176 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002177 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302178 if (rxcp->tunneled)
2179 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002180 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002181 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002182 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002183 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002184 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185}
2186
Sathya Perla2e588f82011-03-11 02:49:26 +00002187static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002188{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002189 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302190 * Also ignore ipcksm for ipv6 pkts
2191 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002192 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302193 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002194}
2195
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302196static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002200 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302201 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202
Sathya Perla3abcded2010-10-03 22:12:27 -07002203 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204 BUG_ON(!rx_page_info->page);
2205
Sathya Perlae50287b2014-03-04 12:14:38 +05302206 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002207 dma_unmap_page(&adapter->pdev->dev,
2208 dma_unmap_addr(rx_page_info, bus),
2209 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302210 rx_page_info->last_frag = false;
2211 } else {
2212 dma_sync_single_for_cpu(&adapter->pdev->dev,
2213 dma_unmap_addr(rx_page_info, bus),
2214 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002215 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302217 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218 atomic_dec(&rxq->used);
2219 return rx_page_info;
2220}
2221
2222/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223static void be_rx_compl_discard(struct be_rx_obj *rxo,
2224 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002227 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002229 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302230 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002231 put_page(page_info->page);
2232 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233 }
2234}
2235
2236/*
2237 * skb_fill_rx_data forms a complete skb for an ether frame
2238 * indicated by rxcp.
2239 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2241 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002244 u16 i, j;
2245 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246 u8 *start;
2247
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302248 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249 start = page_address(page_info->page) + page_info->page_offset;
2250 prefetch(start);
2251
2252 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002253 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255 skb->len = curr_frag_len;
2256 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002257 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258 /* Complete packet has now been moved to data */
2259 put_page(page_info->page);
2260 skb->data_len = 0;
2261 skb->tail += curr_frag_len;
2262 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002263 hdr_len = ETH_HLEN;
2264 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002266 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267 skb_shinfo(skb)->frags[0].page_offset =
2268 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302269 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2270 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002272 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273 skb->tail += hdr_len;
2274 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002275 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276
Sathya Perla2e588f82011-03-11 02:49:26 +00002277 if (rxcp->pkt_size <= rx_frag_size) {
2278 BUG_ON(rxcp->num_rcvd != 1);
2279 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280 }
2281
2282 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002283 remaining = rxcp->pkt_size - curr_frag_len;
2284 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302285 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002286 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002288 /* Coalesce all frags from the same physical page in one slot */
2289 if (page_info->page_offset == 0) {
2290 /* Fresh page */
2291 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002292 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002293 skb_shinfo(skb)->frags[j].page_offset =
2294 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002295 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002296 skb_shinfo(skb)->nr_frags++;
2297 } else {
2298 put_page(page_info->page);
2299 }
2300
Eric Dumazet9e903e02011-10-18 21:00:24 +00002301 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 skb->len += curr_frag_len;
2303 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002304 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002305 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002306 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002308 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309}
2310
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002311/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302312static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002315 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002316 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002318
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002319 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002320 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002321 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323 return;
2324 }
2325
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002328 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002329 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002330 else
2331 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002333 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002334 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002336 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302337
Tom Herbertb6c0e892014-08-27 21:27:17 -07002338 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302339 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340
Jiri Pirko343e43c2011-08-25 02:50:51 +00002341 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002342 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002343
2344 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345}
2346
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002347/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002348static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2349 struct napi_struct *napi,
2350 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002352 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002354 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002355 u16 remaining, curr_frag_len;
2356 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002357
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002358 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002359 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002360 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002361 return;
2362 }
2363
Sathya Perla2e588f82011-03-11 02:49:26 +00002364 remaining = rxcp->pkt_size;
2365 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302366 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367
2368 curr_frag_len = min(remaining, rx_frag_size);
2369
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002370 /* Coalesce all frags from the same physical page in one slot */
2371 if (i == 0 || page_info->page_offset == 0) {
2372 /* First frag or Fresh page */
2373 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002374 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002375 skb_shinfo(skb)->frags[j].page_offset =
2376 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002377 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002378 } else {
2379 put_page(page_info->page);
2380 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002381 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002382 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 memset(page_info, 0, sizeof(*page_info));
2385 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002386 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002388 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002389 skb->len = rxcp->pkt_size;
2390 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002391 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002392 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002393 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002394 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302395
Tom Herbertb6c0e892014-08-27 21:27:17 -07002396 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002397
Jiri Pirko343e43c2011-08-25 02:50:51 +00002398 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002399 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002400
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002402}
2403
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2405 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302407 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2408 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2409 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2410 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2411 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2412 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2413 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2414 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2415 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2416 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2417 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002418 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302419 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2420 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002421 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302422 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302423 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302424 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002425}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002427static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2428 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002429{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302430 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2431 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2432 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2433 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2434 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2435 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2436 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2437 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2438 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2439 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2440 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002441 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302442 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2443 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002444 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302445 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2446 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002447}
2448
2449static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2450{
2451 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2452 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2453 struct be_adapter *adapter = rxo->adapter;
2454
2455 /* For checking the valid bit it is Ok to use either definition as the
2456 * valid bit is at the same position in both v0 and v1 Rx compl */
2457 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002458 return NULL;
2459
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002460 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002461 be_dws_le_to_cpu(compl, sizeof(*compl));
2462
2463 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002465 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002466 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002467
Somnath Koture38b1702013-05-29 22:55:56 +00002468 if (rxcp->ip_frag)
2469 rxcp->l4_csum = 0;
2470
Sathya Perla15d72182011-03-21 20:49:26 +00002471 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302472 /* In QNQ modes, if qnq bit is not set, then the packet was
2473 * tagged only with the transparent outer vlan-tag and must
2474 * not be treated as a vlan packet by host
2475 */
2476 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002477 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002478
Sathya Perla15d72182011-03-21 20:49:26 +00002479 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002480 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002481
Somnath Kotur939cf302011-08-18 21:51:49 -07002482 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302483 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002484 rxcp->vlanf = 0;
2485 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002486
2487 /* As the compl has been parsed, reset it; we wont touch it again */
2488 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002489
Sathya Perla3abcded2010-10-03 22:12:27 -07002490 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002491 return rxcp;
2492}
2493
Eric Dumazet1829b082011-03-01 05:48:12 +00002494static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002497
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002498 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002499 gfp |= __GFP_COMP;
2500 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002501}
2502
2503/*
2504 * Allocate a page, split it to fragments of size rx_frag_size and post as
2505 * receive buffers to BE
2506 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302507static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508{
Sathya Perla3abcded2010-10-03 22:12:27 -07002509 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002510 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002511 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002513 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514 struct be_eth_rx_d *rxd;
2515 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302516 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002517
Sathya Perla3abcded2010-10-03 22:12:27 -07002518 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302519 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002520 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002521 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002522 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002523 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002524 break;
2525 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002526 page_dmaaddr = dma_map_page(dev, pagep, 0,
2527 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002528 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002529 if (dma_mapping_error(dev, page_dmaaddr)) {
2530 put_page(pagep);
2531 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302532 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002533 break;
2534 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302535 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002536 } else {
2537 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302538 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002539 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302540 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002542
2543 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302544 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2546 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002547
2548 /* Any space left in the current big page for another frag? */
2549 if ((page_offset + rx_frag_size + rx_frag_size) >
2550 adapter->big_page_size) {
2551 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302552 page_info->last_frag = true;
2553 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2554 } else {
2555 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002557
2558 prev_page_info = page_info;
2559 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002560 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002561 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302562
2563 /* Mark the last frag of a page when we break out of the above loop
2564 * with no more slots available in the RXQ
2565 */
2566 if (pagep) {
2567 prev_page_info->last_frag = true;
2568 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2569 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002570
2571 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002572 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302573 if (rxo->rx_post_starved)
2574 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302575 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002576 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302577 be_rxq_notify(adapter, rxq->id, notify);
2578 posted -= notify;
2579 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002580 } else if (atomic_read(&rxq->used) == 0) {
2581 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002582 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002584}
2585
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302586static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002587{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302588 struct be_queue_info *tx_cq = &txo->cq;
2589 struct be_tx_compl_info *txcp = &txo->txcp;
2590 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302592 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593 return NULL;
2594
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302595 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002596 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302597 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302599 txcp->status = GET_TX_COMPL_BITS(status, compl);
2600 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002601
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302602 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002603 queue_tail_inc(tx_cq);
2604 return txcp;
2605}
2606
Sathya Perla3c8def92011-06-12 20:01:58 +00002607static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302608 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002609{
Sathya Perla3c8def92011-06-12 20:01:58 +00002610 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002611 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002612 struct sk_buff *skb = NULL;
2613 bool unmap_skb_hdr = false;
2614 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302615 u16 num_wrbs = 0;
2616 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002617
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002618 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002619 if (sent_skbs[txq->tail]) {
2620 /* Free skb from prev req */
2621 if (skb)
2622 dev_consume_skb_any(skb);
2623 skb = sent_skbs[txq->tail];
2624 sent_skbs[txq->tail] = NULL;
2625 queue_tail_inc(txq); /* skip hdr wrb */
2626 num_wrbs++;
2627 unmap_skb_hdr = true;
2628 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002629 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002630 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002631 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002632 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002633 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002635 num_wrbs++;
2636 } while (frag_index != last_index);
2637 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002639 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002640}
2641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002642/* Return the number of events in the event queue */
2643static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002644{
2645 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002646 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648 do {
2649 eqe = queue_tail_node(&eqo->q);
2650 if (eqe->evt == 0)
2651 break;
2652
2653 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002654 eqe->evt = 0;
2655 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002656 queue_tail_inc(&eqo->q);
2657 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002658
2659 return num;
2660}
2661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002662/* Leaves the EQ is disarmed state */
2663static void be_eq_clean(struct be_eq_obj *eqo)
2664{
2665 int num = events_get(eqo);
2666
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002667 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002668}
2669
Kalesh AP99b44302015-08-05 03:27:49 -04002670/* Free posted rx buffers that were not used */
2671static void be_rxq_clean(struct be_rx_obj *rxo)
2672{
2673 struct be_queue_info *rxq = &rxo->q;
2674 struct be_rx_page_info *page_info;
2675
2676 while (atomic_read(&rxq->used) > 0) {
2677 page_info = get_rx_page_info(rxo);
2678 put_page(page_info->page);
2679 memset(page_info, 0, sizeof(*page_info));
2680 }
2681 BUG_ON(atomic_read(&rxq->used));
2682 rxq->tail = 0;
2683 rxq->head = 0;
2684}
2685
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002686static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002687{
Sathya Perla3abcded2010-10-03 22:12:27 -07002688 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002689 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002690 struct be_adapter *adapter = rxo->adapter;
2691 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692
Sathya Perlad23e9462012-12-17 19:38:51 +00002693 /* Consume pending rx completions.
2694 * Wait for the flush completion (identified by zero num_rcvd)
2695 * to arrive. Notify CQ even when there are no more CQ entries
2696 * for HW to flush partially coalesced CQ entries.
2697 * In Lancer, there is no need to wait for flush compl.
2698 */
2699 for (;;) {
2700 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302701 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002702 if (lancer_chip(adapter))
2703 break;
2704
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302705 if (flush_wait++ > 50 ||
2706 be_check_error(adapter,
2707 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002708 dev_warn(&adapter->pdev->dev,
2709 "did not receive flush compl\n");
2710 break;
2711 }
2712 be_cq_notify(adapter, rx_cq->id, true, 0);
2713 mdelay(1);
2714 } else {
2715 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002716 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002717 if (rxcp->num_rcvd == 0)
2718 break;
2719 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002720 }
2721
Sathya Perlad23e9462012-12-17 19:38:51 +00002722 /* After cleanup, leave the CQ in unarmed state */
2723 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724}
2725
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002726static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002727{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002728 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302729 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302730 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002731 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302732 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302733 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002734 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302736 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002737 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002738 pending_txqs = adapter->num_tx_qs;
2739
2740 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302741 cmpl = 0;
2742 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002743 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302744 while ((txcp = be_tx_compl_get(txo))) {
2745 num_wrbs +=
2746 be_tx_compl_process(adapter, txo,
2747 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002748 cmpl++;
2749 }
2750 if (cmpl) {
2751 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2752 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302753 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002754 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302755 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002756 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002757 }
2758
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302759 if (pending_txqs == 0 || ++timeo > 10 ||
2760 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002761 break;
2762
2763 mdelay(1);
2764 } while (true);
2765
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002766 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002767 for_all_tx_queues(adapter, txo, i) {
2768 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002769
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002770 if (atomic_read(&txq->used)) {
2771 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2772 i, atomic_read(&txq->used));
2773 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002774 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002775 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2776 txq->len);
2777 /* Use the tx-compl process logic to handle requests
2778 * that were not sent to the HW.
2779 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002780 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2781 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002782 BUG_ON(atomic_read(&txq->used));
2783 txo->pend_wrb_cnt = 0;
2784 /* Since hw was never notified of these requests,
2785 * reset TXQ indices
2786 */
2787 txq->head = notified_idx;
2788 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002789 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002790 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002791}
2792
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002793static void be_evt_queues_destroy(struct be_adapter *adapter)
2794{
2795 struct be_eq_obj *eqo;
2796 int i;
2797
2798 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002799 if (eqo->q.created) {
2800 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002801 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302802 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302803 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002804 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002805 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002806 be_queue_free(adapter, &eqo->q);
2807 }
2808}
2809
2810static int be_evt_queues_create(struct be_adapter *adapter)
2811{
2812 struct be_queue_info *eq;
2813 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302814 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002815 int i, rc;
2816
Sathya Perlae2617682016-06-22 08:54:54 -04002817 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302818 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002819 max(adapter->cfg_num_rx_irqs,
2820 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002821
2822 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302823 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002824
Sathya Perla2632baf2013-10-01 16:00:00 +05302825 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002826 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002827 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302828 aic->max_eqd = BE_MAX_EQD;
2829 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002830
2831 eq = &eqo->q;
2832 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302833 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834 if (rc)
2835 return rc;
2836
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302837 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002838 if (rc)
2839 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002840
2841 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2842 return -ENOMEM;
2843 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2844 eqo->affinity_mask);
2845 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2846 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002847 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002848 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002849}
2850
Sathya Perla5fb379e2009-06-18 00:02:59 +00002851static void be_mcc_queues_destroy(struct be_adapter *adapter)
2852{
2853 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002854
Sathya Perla8788fdc2009-07-27 22:52:03 +00002855 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002856 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002857 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002858 be_queue_free(adapter, q);
2859
Sathya Perla8788fdc2009-07-27 22:52:03 +00002860 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002861 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002862 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002863 be_queue_free(adapter, q);
2864}
2865
2866/* Must be called only after TX qs are created as MCC shares TX EQ */
2867static int be_mcc_queues_create(struct be_adapter *adapter)
2868{
2869 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002870
Sathya Perla8788fdc2009-07-27 22:52:03 +00002871 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002872 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302873 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002874 goto err;
2875
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002876 /* Use the default EQ for MCC completions */
2877 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002878 goto mcc_cq_free;
2879
Sathya Perla8788fdc2009-07-27 22:52:03 +00002880 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002881 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2882 goto mcc_cq_destroy;
2883
Sathya Perla8788fdc2009-07-27 22:52:03 +00002884 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002885 goto mcc_q_free;
2886
2887 return 0;
2888
2889mcc_q_free:
2890 be_queue_free(adapter, q);
2891mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002892 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002893mcc_cq_free:
2894 be_queue_free(adapter, cq);
2895err:
2896 return -1;
2897}
2898
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899static void be_tx_queues_destroy(struct be_adapter *adapter)
2900{
2901 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002902 struct be_tx_obj *txo;
2903 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002904
Sathya Perla3c8def92011-06-12 20:01:58 +00002905 for_all_tx_queues(adapter, txo, i) {
2906 q = &txo->q;
2907 if (q->created)
2908 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2909 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910
Sathya Perla3c8def92011-06-12 20:01:58 +00002911 q = &txo->cq;
2912 if (q->created)
2913 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2914 be_queue_free(adapter, q);
2915 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002916}
2917
Sathya Perla77071332013-08-27 16:57:34 +05302918static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002919{
Sathya Perla73f394e2015-03-26 03:05:09 -04002920 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002921 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002922 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302923 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924
Sathya Perlae2617682016-06-22 08:54:54 -04002925 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002926
Sathya Perla3c8def92011-06-12 20:01:58 +00002927 for_all_tx_queues(adapter, txo, i) {
2928 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002929 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2930 sizeof(struct be_eth_tx_compl));
2931 if (status)
2932 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933
John Stultz827da442013-10-07 15:51:58 -07002934 u64_stats_init(&txo->stats.sync);
2935 u64_stats_init(&txo->stats.sync_compl);
2936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002937 /* If num_evt_qs is less than num_tx_qs, then more than
2938 * one txq share an eq
2939 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002940 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2941 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002942 if (status)
2943 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002945 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2946 sizeof(struct be_eth_wrb));
2947 if (status)
2948 return status;
2949
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002950 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002951 if (status)
2952 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002953
2954 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2955 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002956 }
2957
Sathya Perlad3791422012-09-28 04:39:44 +00002958 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2959 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 return 0;
2961}
2962
2963static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002964{
2965 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002966 struct be_rx_obj *rxo;
2967 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968
Sathya Perla3abcded2010-10-03 22:12:27 -07002969 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002970 q = &rxo->cq;
2971 if (q->created)
2972 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2973 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002974 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002975}
2976
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002977static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002978{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002979 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002980 struct be_rx_obj *rxo;
2981 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002982
Sathya Perlae2617682016-06-22 08:54:54 -04002983 adapter->num_rss_qs =
2984 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302985
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002986 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04002987 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002988 adapter->num_rss_qs = 0;
2989
2990 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2991
2992 /* When the interface is not capable of RSS rings (and there is no
2993 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002994 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002995 if (adapter->num_rx_qs == 0)
2996 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302997
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002999 for_all_rx_queues(adapter, rxo, i) {
3000 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07003001 cq = &rxo->cq;
3002 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05303003 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07003004 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003005 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006
John Stultz827da442013-10-07 15:51:58 -07003007 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003008 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3009 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07003010 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003011 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07003012 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013
Sathya Perlad3791422012-09-28 04:39:44 +00003014 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003015 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003016 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00003017}
3018
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019static irqreturn_t be_intx(int irq, void *dev)
3020{
Sathya Perlae49cc342012-11-27 19:50:02 +00003021 struct be_eq_obj *eqo = dev;
3022 struct be_adapter *adapter = eqo->adapter;
3023 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003024
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003025 /* IRQ is not expected when NAPI is scheduled as the EQ
3026 * will not be armed.
3027 * But, this can happen on Lancer INTx where it takes
3028 * a while to de-assert INTx or in BE2 where occasionaly
3029 * an interrupt may be raised even when EQ is unarmed.
3030 * If NAPI is already scheduled, then counting & notifying
3031 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00003032 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003033 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003034 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003035 __napi_schedule(&eqo->napi);
3036 if (num_evts)
3037 eqo->spurious_intr = 0;
3038 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003039 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003040
3041 /* Return IRQ_HANDLED only for the the first spurious intr
3042 * after a valid intr to stop the kernel from branding
3043 * this irq as a bad one!
3044 */
3045 if (num_evts || eqo->spurious_intr++ == 0)
3046 return IRQ_HANDLED;
3047 else
3048 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003049}
3050
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003051static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003053 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003054
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003055 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003056 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003057 return IRQ_HANDLED;
3058}
3059
Sathya Perla2e588f82011-03-11 02:49:26 +00003060static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003061{
Somnath Koture38b1702013-05-29 22:55:56 +00003062 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003063}
3064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003065static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05303066 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067{
Sathya Perla3abcded2010-10-03 22:12:27 -07003068 struct be_adapter *adapter = rxo->adapter;
3069 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003070 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303072 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073
3074 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003075 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003076 if (!rxcp)
3077 break;
3078
Sathya Perla12004ae2011-08-02 19:57:46 +00003079 /* Is it a flush compl that has no data */
3080 if (unlikely(rxcp->num_rcvd == 0))
3081 goto loop_continue;
3082
3083 /* Discard compl with partial DMA Lancer B0 */
3084 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003085 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003086 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003087 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003088
Sathya Perla12004ae2011-08-02 19:57:46 +00003089 /* On BE drop pkts that arrive due to imperfect filtering in
3090 * promiscuous mode on some skews
3091 */
3092 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303093 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003094 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003095 goto loop_continue;
3096 }
3097
Sathya Perla6384a4d2013-10-25 10:40:16 +05303098 /* Don't do gro when we're busy_polling */
3099 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003100 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003101 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303102 be_rx_compl_process(rxo, napi, rxcp);
3103
Sathya Perla12004ae2011-08-02 19:57:46 +00003104loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303105 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003106 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003107 }
3108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003109 if (work_done) {
3110 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003111
Sathya Perla6384a4d2013-10-25 10:40:16 +05303112 /* When an rx-obj gets into post_starved state, just
3113 * let be_worker do the posting.
3114 */
3115 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3116 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303117 be_post_rx_frags(rxo, GFP_ATOMIC,
3118 max_t(u32, MAX_RX_POST,
3119 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122 return work_done;
3123}
3124
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303125static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303126{
3127 switch (status) {
3128 case BE_TX_COMP_HDR_PARSE_ERR:
3129 tx_stats(txo)->tx_hdr_parse_err++;
3130 break;
3131 case BE_TX_COMP_NDMA_ERR:
3132 tx_stats(txo)->tx_dma_err++;
3133 break;
3134 case BE_TX_COMP_ACL_ERR:
3135 tx_stats(txo)->tx_spoof_check_err++;
3136 break;
3137 }
3138}
3139
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303140static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303141{
3142 switch (status) {
3143 case LANCER_TX_COMP_LSO_ERR:
3144 tx_stats(txo)->tx_tso_err++;
3145 break;
3146 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3147 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3148 tx_stats(txo)->tx_spoof_check_err++;
3149 break;
3150 case LANCER_TX_COMP_QINQ_ERR:
3151 tx_stats(txo)->tx_qinq_err++;
3152 break;
3153 case LANCER_TX_COMP_PARITY_ERR:
3154 tx_stats(txo)->tx_internal_parity_err++;
3155 break;
3156 case LANCER_TX_COMP_DMA_ERR:
3157 tx_stats(txo)->tx_dma_err++;
3158 break;
3159 }
3160}
3161
Sathya Perlac8f64612014-09-02 09:56:55 +05303162static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3163 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003164{
Sathya Perlac8f64612014-09-02 09:56:55 +05303165 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303166 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003167
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303168 while ((txcp = be_tx_compl_get(txo))) {
3169 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303170 work_done++;
3171
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303172 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303173 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303174 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303175 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303176 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303177 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003178 }
3179
3180 if (work_done) {
3181 be_cq_notify(adapter, txo->cq.id, true, work_done);
3182 atomic_sub(num_wrbs, &txo->q.used);
3183
3184 /* As Tx wrbs have been freed up, wake up netdev queue
3185 * if it was stopped due to lack of tx wrbs. */
3186 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303187 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003188 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003189 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003191 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3192 tx_stats(txo)->tx_compl += work_done;
3193 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3194 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003195}
Sathya Perla3c8def92011-06-12 20:01:58 +00003196
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003197#ifdef CONFIG_NET_RX_BUSY_POLL
3198static inline bool be_lock_napi(struct be_eq_obj *eqo)
3199{
3200 bool status = true;
3201
3202 spin_lock(&eqo->lock); /* BH is already disabled */
3203 if (eqo->state & BE_EQ_LOCKED) {
3204 WARN_ON(eqo->state & BE_EQ_NAPI);
3205 eqo->state |= BE_EQ_NAPI_YIELD;
3206 status = false;
3207 } else {
3208 eqo->state = BE_EQ_NAPI;
3209 }
3210 spin_unlock(&eqo->lock);
3211 return status;
3212}
3213
3214static inline void be_unlock_napi(struct be_eq_obj *eqo)
3215{
3216 spin_lock(&eqo->lock); /* BH is already disabled */
3217
3218 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3219 eqo->state = BE_EQ_IDLE;
3220
3221 spin_unlock(&eqo->lock);
3222}
3223
3224static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3225{
3226 bool status = true;
3227
3228 spin_lock_bh(&eqo->lock);
3229 if (eqo->state & BE_EQ_LOCKED) {
3230 eqo->state |= BE_EQ_POLL_YIELD;
3231 status = false;
3232 } else {
3233 eqo->state |= BE_EQ_POLL;
3234 }
3235 spin_unlock_bh(&eqo->lock);
3236 return status;
3237}
3238
3239static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3240{
3241 spin_lock_bh(&eqo->lock);
3242
3243 WARN_ON(eqo->state & (BE_EQ_NAPI));
3244 eqo->state = BE_EQ_IDLE;
3245
3246 spin_unlock_bh(&eqo->lock);
3247}
3248
3249static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3250{
3251 spin_lock_init(&eqo->lock);
3252 eqo->state = BE_EQ_IDLE;
3253}
3254
3255static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3256{
3257 local_bh_disable();
3258
3259 /* It's enough to just acquire napi lock on the eqo to stop
3260 * be_busy_poll() from processing any queueus.
3261 */
3262 while (!be_lock_napi(eqo))
3263 mdelay(1);
3264
3265 local_bh_enable();
3266}
3267
3268#else /* CONFIG_NET_RX_BUSY_POLL */
3269
3270static inline bool be_lock_napi(struct be_eq_obj *eqo)
3271{
3272 return true;
3273}
3274
3275static inline void be_unlock_napi(struct be_eq_obj *eqo)
3276{
3277}
3278
3279static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3280{
3281 return false;
3282}
3283
3284static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3285{
3286}
3287
3288static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3289{
3290}
3291
3292static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3293{
3294}
3295#endif /* CONFIG_NET_RX_BUSY_POLL */
3296
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303297int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003298{
3299 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3300 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003301 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303302 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303303 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003304 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003305
Sathya Perla0b545a62012-11-23 00:27:18 +00003306 num_evts = events_get(eqo);
3307
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303308 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3309 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310
Sathya Perla6384a4d2013-10-25 10:40:16 +05303311 if (be_lock_napi(eqo)) {
3312 /* This loop will iterate twice for EQ0 in which
3313 * completions of the last RXQ (default one) are also processed
3314 * For other EQs the loop iterates only once
3315 */
3316 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3317 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3318 max_work = max(work, max_work);
3319 }
3320 be_unlock_napi(eqo);
3321 } else {
3322 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003323 }
3324
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003325 if (is_mcc_eqo(eqo))
3326 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003327
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003328 if (max_work < budget) {
3329 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003330
3331 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3332 * delay via a delay multiplier encoding value
3333 */
3334 if (skyhawk_chip(adapter))
3335 mult_enc = be_get_eq_delay_mult_enc(eqo);
3336
3337 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3338 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003339 } else {
3340 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003341 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003342 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003343 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344}
3345
Sathya Perla6384a4d2013-10-25 10:40:16 +05303346#ifdef CONFIG_NET_RX_BUSY_POLL
3347static int be_busy_poll(struct napi_struct *napi)
3348{
3349 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3350 struct be_adapter *adapter = eqo->adapter;
3351 struct be_rx_obj *rxo;
3352 int i, work = 0;
3353
3354 if (!be_lock_busy_poll(eqo))
3355 return LL_FLUSH_BUSY;
3356
3357 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3358 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3359 if (work)
3360 break;
3361 }
3362
3363 be_unlock_busy_poll(eqo);
3364 return work;
3365}
3366#endif
3367
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003368void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003369{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003370 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3371 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003372 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303373 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003374
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303375 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003376 return;
3377
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003378 if (lancer_chip(adapter)) {
3379 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3380 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303381 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003382 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303383 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003384 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303385 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303386 /* Do not log error messages if its a FW reset */
3387 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3388 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3389 dev_info(dev, "Firmware update in progress\n");
3390 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303391 dev_err(dev, "Error detected in the card\n");
3392 dev_err(dev, "ERR: sliport status 0x%x\n",
3393 sliport_status);
3394 dev_err(dev, "ERR: sliport error1 0x%x\n",
3395 sliport_err1);
3396 dev_err(dev, "ERR: sliport error2 0x%x\n",
3397 sliport_err2);
3398 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003399 }
3400 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003401 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3402 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3403 ue_lo_mask = ioread32(adapter->pcicfg +
3404 PCICFG_UE_STATUS_LOW_MASK);
3405 ue_hi_mask = ioread32(adapter->pcicfg +
3406 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003407
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003408 ue_lo = (ue_lo & ~ue_lo_mask);
3409 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003410
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303411 /* On certain platforms BE hardware can indicate spurious UEs.
3412 * Allow HW to stop working completely in case of a real UE.
3413 * Hence not setting the hw_error for UE detection.
3414 */
3415
3416 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303417 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303418 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303419 be_set_error(adapter, BE_ERROR_UE);
3420
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303421 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3422 if (ue_lo & 1)
3423 dev_err(dev, "UE: %s bit set\n",
3424 ue_status_low_desc[i]);
3425 }
3426 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3427 if (ue_hi & 1)
3428 dev_err(dev, "UE: %s bit set\n",
3429 ue_status_hi_desc[i]);
3430 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303431 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003432 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003433}
3434
Sathya Perla8d56ff12009-11-22 22:02:26 +00003435static void be_msix_disable(struct be_adapter *adapter)
3436{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003437 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003438 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003439 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303440 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003441 }
3442}
3443
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003444static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003445{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003446 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003447 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003448 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449
Sathya Perlace7faf02016-06-22 08:54:53 -04003450 /* If RoCE is supported, program the max number of vectors that
3451 * could be used for NIC and RoCE, else, just program the number
3452 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303453 */
Sathya Perlae2617682016-06-22 08:54:54 -04003454 if (be_roce_supported(adapter)) {
3455 max_roce_eqs =
3456 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3457 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3458 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3459 } else {
3460 num_vec = max(adapter->cfg_num_rx_irqs,
3461 adapter->cfg_num_tx_irqs);
3462 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003463
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003464 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003465 adapter->msix_entries[i].entry = i;
3466
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003467 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3468 MIN_MSIX_VECTORS, num_vec);
3469 if (num_vec < 0)
3470 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003471
Sathya Perla92bf14a2013-08-27 16:57:32 +05303472 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3473 adapter->num_msix_roce_vec = num_vec / 2;
3474 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3475 adapter->num_msix_roce_vec);
3476 }
3477
3478 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3479
3480 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3481 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003482 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003483
3484fail:
3485 dev_warn(dev, "MSIx enable failed\n");
3486
3487 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003488 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003489 return num_vec;
3490 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003491}
3492
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003493static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303494 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003495{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303496 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003497}
3498
3499static int be_msix_register(struct be_adapter *adapter)
3500{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003501 struct net_device *netdev = adapter->netdev;
3502 struct be_eq_obj *eqo;
3503 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003504
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003505 for_all_evt_queues(adapter, eqo, i) {
3506 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3507 vec = be_msix_vec_get(adapter, eqo);
3508 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003509 if (status)
3510 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003511
3512 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003513 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003514
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003515 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003516err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303517 for (i--; i >= 0; i--) {
3518 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003519 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303520 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003521 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303522 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003523 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003524 return status;
3525}
3526
3527static int be_irq_register(struct be_adapter *adapter)
3528{
3529 struct net_device *netdev = adapter->netdev;
3530 int status;
3531
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003532 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533 status = be_msix_register(adapter);
3534 if (status == 0)
3535 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003536 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003537 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003538 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003539 }
3540
Sathya Perlae49cc342012-11-27 19:50:02 +00003541 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003542 netdev->irq = adapter->pdev->irq;
3543 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003544 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003545 if (status) {
3546 dev_err(&adapter->pdev->dev,
3547 "INTx request IRQ failed - err %d\n", status);
3548 return status;
3549 }
3550done:
3551 adapter->isr_registered = true;
3552 return 0;
3553}
3554
3555static void be_irq_unregister(struct be_adapter *adapter)
3556{
3557 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003558 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003559 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003560
3561 if (!adapter->isr_registered)
3562 return;
3563
3564 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003565 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003566 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003567 goto done;
3568 }
3569
3570 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003571 for_all_evt_queues(adapter, eqo, i) {
3572 vec = be_msix_vec_get(adapter, eqo);
3573 irq_set_affinity_hint(vec, NULL);
3574 free_irq(vec, eqo);
3575 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003576
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003577done:
3578 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003579}
3580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003581static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003582{
Ajit Khaparde62219062016-02-10 22:45:53 +05303583 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003584 struct be_queue_info *q;
3585 struct be_rx_obj *rxo;
3586 int i;
3587
3588 for_all_rx_queues(adapter, rxo, i) {
3589 q = &rxo->q;
3590 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003591 /* If RXQs are destroyed while in an "out of buffer"
3592 * state, there is a possibility of an HW stall on
3593 * Lancer. So, post 64 buffers to each queue to relieve
3594 * the "out of buffer" condition.
3595 * Make sure there's space in the RXQ before posting.
3596 */
3597 if (lancer_chip(adapter)) {
3598 be_rx_cq_clean(rxo);
3599 if (atomic_read(&q->used) == 0)
3600 be_post_rx_frags(rxo, GFP_KERNEL,
3601 MAX_RX_POST);
3602 }
3603
Sathya Perla482c9e72011-06-29 23:33:17 +00003604 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003605 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003606 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003607 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003608 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003609 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303610
3611 if (rss->rss_flags) {
3612 rss->rss_flags = RSS_ENABLE_NONE;
3613 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3614 128, rss->rss_hkey);
3615 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003616}
3617
Kalesh APbcc84142015-08-05 03:27:48 -04003618static void be_disable_if_filters(struct be_adapter *adapter)
3619{
Suresh Reddy988d44b2016-09-07 19:57:52 +05303620 be_dev_mac_del(adapter, adapter->pmac_id[0]);
Kalesh APbcc84142015-08-05 03:27:48 -04003621 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003622 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003623
3624 /* The IFACE flags are enabled in the open path and cleared
3625 * in the close path. When a VF gets detached from the host and
3626 * assigned to a VM the following happens:
3627 * - VF's IFACE flags get cleared in the detach path
3628 * - IFACE create is issued by the VF in the attach path
3629 * Due to a bug in the BE3/Skyhawk-R FW
3630 * (Lancer FW doesn't have the bug), the IFACE capability flags
3631 * specified along with the IFACE create cmd issued by a VF are not
3632 * honoured by FW. As a consequence, if a *new* driver
3633 * (that enables/disables IFACE flags in open/close)
3634 * is loaded in the host and an *old* driver is * used by a VM/VF,
3635 * the IFACE gets created *without* the needed flags.
3636 * To avoid this, disable RX-filter flags only for Lancer.
3637 */
3638 if (lancer_chip(adapter)) {
3639 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3640 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3641 }
3642}
3643
Sathya Perla889cd4b2010-05-30 23:33:45 +00003644static int be_close(struct net_device *netdev)
3645{
3646 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003647 struct be_eq_obj *eqo;
3648 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003649
Kalesh APe1ad8e32014-04-14 16:12:41 +05303650 /* This protection is needed as be_close() may be called even when the
3651 * adapter is in cleared state (after eeh perm failure)
3652 */
3653 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3654 return 0;
3655
Sathya Perlab7172412016-07-27 05:26:18 -04003656 /* Before attempting cleanup ensure all the pending cmds in the
3657 * config_wq have finished execution
3658 */
3659 flush_workqueue(be_wq);
3660
Kalesh APbcc84142015-08-05 03:27:48 -04003661 be_disable_if_filters(adapter);
3662
Ivan Veceradff345c52013-11-27 08:59:32 +01003663 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3664 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003665 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303666 be_disable_busy_poll(eqo);
3667 }
David S. Miller71237b62013-11-28 18:53:36 -05003668 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003669 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003670
3671 be_async_mcc_disable(adapter);
3672
3673 /* Wait for all pending tx completions to arrive so that
3674 * all tx skbs are freed.
3675 */
Sathya Perlafba87552013-05-08 02:05:50 +00003676 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303677 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003678
3679 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003680
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003681 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003682 if (msix_enabled(adapter))
3683 synchronize_irq(be_msix_vec_get(adapter, eqo));
3684 else
3685 synchronize_irq(netdev->irq);
3686 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003687 }
3688
Sathya Perla889cd4b2010-05-30 23:33:45 +00003689 be_irq_unregister(adapter);
3690
Sathya Perla482c9e72011-06-29 23:33:17 +00003691 return 0;
3692}
3693
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003694static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003695{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003696 struct rss_info *rss = &adapter->rss_info;
3697 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003698 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003699 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003700
3701 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003702 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3703 sizeof(struct be_eth_rx_d));
3704 if (rc)
3705 return rc;
3706 }
3707
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003708 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3709 rxo = default_rxo(adapter);
3710 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3711 rx_frag_size, adapter->if_handle,
3712 false, &rxo->rss_id);
3713 if (rc)
3714 return rc;
3715 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003716
3717 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003718 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003719 rx_frag_size, adapter->if_handle,
3720 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003721 if (rc)
3722 return rc;
3723 }
3724
3725 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003726 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003727 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303728 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003729 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303730 rss->rsstable[j + i] = rxo->rss_id;
3731 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003732 }
3733 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303734 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3735 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003736
3737 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303738 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3739 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303740
3741 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3742 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3743 RSS_INDIR_TABLE_LEN, rss_key);
3744 if (rc) {
3745 rss->rss_flags = RSS_ENABLE_NONE;
3746 return rc;
3747 }
3748
3749 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303750 } else {
3751 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303752 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303753 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003754
Venkata Duvvurue2557872014-04-21 15:38:00 +05303755
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003756 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3757 * which is a queue empty condition
3758 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003759 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003760 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3761
Sathya Perla889cd4b2010-05-30 23:33:45 +00003762 return 0;
3763}
3764
Kalesh APbcc84142015-08-05 03:27:48 -04003765static int be_enable_if_filters(struct be_adapter *adapter)
3766{
3767 int status;
3768
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003769 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003770 if (status)
3771 return status;
3772
3773 /* For BE3 VFs, the PF programs the initial MAC address */
3774 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05303775 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003776 if (status)
3777 return status;
Suresh Reddyc27ebf52016-09-07 19:57:53 +05303778 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003779 }
3780
3781 if (adapter->vlans_added)
3782 be_vid_config(adapter);
3783
Sathya Perlab7172412016-07-27 05:26:18 -04003784 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003785
3786 return 0;
3787}
3788
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003789static int be_open(struct net_device *netdev)
3790{
3791 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003792 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003793 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003794 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003795 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003796 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003797
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003798 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003799 if (status)
3800 goto err;
3801
Kalesh APbcc84142015-08-05 03:27:48 -04003802 status = be_enable_if_filters(adapter);
3803 if (status)
3804 goto err;
3805
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003806 status = be_irq_register(adapter);
3807 if (status)
3808 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003809
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003810 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003811 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003812
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003813 for_all_tx_queues(adapter, txo, i)
3814 be_cq_notify(adapter, txo->cq.id, true, 0);
3815
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003816 be_async_mcc_enable(adapter);
3817
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003818 for_all_evt_queues(adapter, eqo, i) {
3819 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303820 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003821 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003822 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003823 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003824
Sathya Perla323ff712012-09-28 04:39:43 +00003825 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003826 if (!status)
3827 be_link_status_update(adapter, link_status);
3828
Sathya Perlafba87552013-05-08 02:05:50 +00003829 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303830 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003831 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303832
Sathya Perla889cd4b2010-05-30 23:33:45 +00003833 return 0;
3834err:
3835 be_close(adapter->netdev);
3836 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003837}
3838
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003839static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3840{
3841 u32 addr;
3842
3843 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3844
3845 mac[5] = (u8)(addr & 0xFF);
3846 mac[4] = (u8)((addr >> 8) & 0xFF);
3847 mac[3] = (u8)((addr >> 16) & 0xFF);
3848 /* Use the OUI from the current MAC address */
3849 memcpy(mac, adapter->netdev->dev_addr, 3);
3850}
3851
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003852/*
3853 * Generate a seed MAC address from the PF MAC Address using jhash.
3854 * MAC Address for VFs are assigned incrementally starting from the seed.
3855 * These addresses are programmed in the ASIC by the PF and the VF driver
3856 * queries for the MAC address during its probe.
3857 */
Sathya Perla4c876612013-02-03 20:30:11 +00003858static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003859{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003860 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003861 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003862 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003863 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003864
3865 be_vf_eth_addr_generate(adapter, mac);
3866
Sathya Perla11ac75e2011-12-13 00:58:50 +00003867 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303868 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003869 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003870 vf_cfg->if_handle,
3871 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303872 else
3873 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3874 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003875
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003876 if (status)
3877 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303878 "Mac address assignment failed for VF %d\n",
3879 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003880 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003881 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003882
3883 mac[5] += 1;
3884 }
3885 return status;
3886}
3887
Sathya Perla4c876612013-02-03 20:30:11 +00003888static int be_vfs_mac_query(struct be_adapter *adapter)
3889{
3890 int status, vf;
3891 u8 mac[ETH_ALEN];
3892 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003893
3894 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303895 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3896 mac, vf_cfg->if_handle,
3897 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003898 if (status)
3899 return status;
3900 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3901 }
3902 return 0;
3903}
3904
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003905static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003906{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003907 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003908 u32 vf;
3909
Sathya Perla257a3fe2013-06-14 15:54:51 +05303910 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003911 dev_warn(&adapter->pdev->dev,
3912 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003913 goto done;
3914 }
3915
Sathya Perlab4c1df92013-05-08 02:05:47 +00003916 pci_disable_sriov(adapter->pdev);
3917
Sathya Perla11ac75e2011-12-13 00:58:50 +00003918 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303919 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003920 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3921 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303922 else
3923 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3924 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003925
Sathya Perla11ac75e2011-12-13 00:58:50 +00003926 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3927 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003928
3929 if (BE3_chip(adapter))
3930 be_cmd_set_hsw_config(adapter, 0, 0,
3931 adapter->if_handle,
3932 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003933done:
3934 kfree(adapter->vf_cfg);
3935 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303936 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003937}
3938
Sathya Perla77071332013-08-27 16:57:34 +05303939static void be_clear_queues(struct be_adapter *adapter)
3940{
3941 be_mcc_queues_destroy(adapter);
3942 be_rx_cqs_destroy(adapter);
3943 be_tx_queues_destroy(adapter);
3944 be_evt_queues_destroy(adapter);
3945}
3946
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303947static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003948{
Sathya Perla191eb752012-02-23 18:50:13 +00003949 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3950 cancel_delayed_work_sync(&adapter->work);
3951 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3952 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303953}
3954
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003955static void be_cancel_err_detection(struct be_adapter *adapter)
3956{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303957 struct be_error_recovery *err_rec = &adapter->error_recovery;
3958
3959 if (!be_err_recovery_workq)
3960 return;
3961
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003962 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303963 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003964 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3965 }
3966}
3967
Sathya Perlac9c47142014-03-27 10:46:19 +05303968static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3969{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003970 struct net_device *netdev = adapter->netdev;
3971
Sathya Perlac9c47142014-03-27 10:46:19 +05303972 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3973 be_cmd_manage_iface(adapter, adapter->if_handle,
3974 OP_CONVERT_TUNNEL_TO_NORMAL);
3975
3976 if (adapter->vxlan_port)
3977 be_cmd_set_vxlan_port(adapter, 0);
3978
3979 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3980 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003981
3982 netdev->hw_enc_features = 0;
3983 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303984 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303985}
3986
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003987static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3988 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05003989{
3990 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003991 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3992 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003993 u16 num_vf_qs = 1;
3994
Somnath Koturde2b1e02016-06-06 07:22:10 -04003995 /* Distribute the queue resources among the PF and it's VFs */
3996 if (num_vfs) {
3997 /* Divide the rx queues evenly among the VFs and the PF, capped
3998 * at VF-EQ-count. Any remainder queues belong to the PF.
3999 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05304000 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4001 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05004002
Somnath Koturde2b1e02016-06-06 07:22:10 -04004003 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4004 * RSS Tables per port. Provide RSS on VFs, only if number of
4005 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05004006 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004007 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05004008 num_vf_qs = 1;
4009 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004010
4011 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4012 * which are modifiable using SET_PROFILE_CONFIG cmd.
4013 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004014 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4015 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004016
4017 /* If RSS IFACE capability flags are modifiable for a VF, set the
4018 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4019 * more than 1 RSSQ is available for a VF.
4020 * Otherwise, provision only 1 queue pair for VF.
4021 */
4022 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4023 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4024 if (num_vf_qs > 1) {
4025 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4026 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4027 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4028 } else {
4029 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4030 BE_IF_FLAGS_DEFQ_RSS);
4031 }
4032 } else {
4033 num_vf_qs = 1;
4034 }
4035
4036 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4037 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4038 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4039 }
4040
4041 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4042 vft_res->max_rx_qs = num_vf_qs;
4043 vft_res->max_rss_qs = num_vf_qs;
4044 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4045 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4046
4047 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4048 * among the PF and it's VFs, if the fields are changeable
4049 */
4050 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4051 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4052
4053 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4054 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4055
4056 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4057 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4058
4059 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4060 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004061}
4062
Sathya Perlab7172412016-07-27 05:26:18 -04004063static void be_if_destroy(struct be_adapter *adapter)
4064{
4065 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4066
4067 kfree(adapter->pmac_id);
4068 adapter->pmac_id = NULL;
4069
4070 kfree(adapter->mc_list);
4071 adapter->mc_list = NULL;
4072
4073 kfree(adapter->uc_list);
4074 adapter->uc_list = NULL;
4075}
4076
Somnath Koturb05004a2013-12-05 12:08:16 +05304077static int be_clear(struct be_adapter *adapter)
4078{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004079 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004080 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004081
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304082 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004083
Sathya Perlab7172412016-07-27 05:26:18 -04004084 flush_workqueue(be_wq);
4085
Sathya Perla11ac75e2011-12-13 00:58:50 +00004086 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004087 be_vf_clear(adapter);
4088
Vasundhara Volambec84e62014-06-30 13:01:32 +05304089 /* Re-configure FW to distribute resources evenly across max-supported
4090 * number of VFs, only when VFs are not already enabled.
4091 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004092 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4093 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004094 be_calculate_vf_res(adapter,
4095 pci_sriov_get_totalvfs(pdev),
4096 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304097 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004098 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004099 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004100 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304101
Sathya Perlac9c47142014-03-27 10:46:19 +05304102 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004103
Sathya Perlab7172412016-07-27 05:26:18 -04004104 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004105
Sathya Perla77071332013-08-27 16:57:34 +05304106 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004108 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304109 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004110 return 0;
4111}
4112
Sathya Perla4c876612013-02-03 20:30:11 +00004113static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004114{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304115 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004116 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004117 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004118 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004119
Kalesh AP0700d812015-01-20 03:51:43 -05004120 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004121 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004122
Sathya Perla4c876612013-02-03 20:30:11 +00004123 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304124 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004125 status = be_cmd_get_profile_config(adapter, &res, NULL,
4126 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004127 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304128 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004129 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304130 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004131 /* Prevent VFs from enabling VLAN promiscuous
4132 * mode
4133 */
4134 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4135 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304136 }
Sathya Perla4c876612013-02-03 20:30:11 +00004137
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004138 /* PF should enable IF flags during proxy if_create call */
4139 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004140 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4141 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004142 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004143 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004144 }
Kalesh AP0700d812015-01-20 03:51:43 -05004145
4146 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004147}
4148
Sathya Perla39f1d942012-05-08 19:41:24 +00004149static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004150{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004151 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004152 int vf;
4153
Sathya Perla39f1d942012-05-08 19:41:24 +00004154 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4155 GFP_KERNEL);
4156 if (!adapter->vf_cfg)
4157 return -ENOMEM;
4158
Sathya Perla11ac75e2011-12-13 00:58:50 +00004159 for_all_vfs(adapter, vf_cfg, vf) {
4160 vf_cfg->if_handle = -1;
4161 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004162 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004163 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004164}
4165
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004166static int be_vf_setup(struct be_adapter *adapter)
4167{
Sathya Perla4c876612013-02-03 20:30:11 +00004168 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304169 struct be_vf_cfg *vf_cfg;
4170 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004171 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004172
Sathya Perla257a3fe2013-06-14 15:54:51 +05304173 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004174
4175 status = be_vf_setup_init(adapter);
4176 if (status)
4177 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004178
Sathya Perla4c876612013-02-03 20:30:11 +00004179 if (old_vfs) {
4180 for_all_vfs(adapter, vf_cfg, vf) {
4181 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4182 if (status)
4183 goto err;
4184 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004185
Sathya Perla4c876612013-02-03 20:30:11 +00004186 status = be_vfs_mac_query(adapter);
4187 if (status)
4188 goto err;
4189 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304190 status = be_vfs_if_create(adapter);
4191 if (status)
4192 goto err;
4193
Sathya Perla39f1d942012-05-08 19:41:24 +00004194 status = be_vf_eth_addr_config(adapter);
4195 if (status)
4196 goto err;
4197 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004198
Sathya Perla11ac75e2011-12-13 00:58:50 +00004199 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304200 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004201 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4202 vf + 1);
4203 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304204 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004205 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304206 BE_PRIV_FILTMGMT,
4207 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004208 if (!status) {
4209 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304210 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4211 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004212 }
Sathya Perla04a06022013-07-23 15:25:00 +05304213 }
4214
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304215 /* Allow full available bandwidth */
4216 if (!old_vfs)
4217 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004218
Kalesh APe7bcbd72015-05-06 05:30:32 -04004219 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4220 vf_cfg->if_handle, NULL,
4221 &spoofchk);
4222 if (!status)
4223 vf_cfg->spoofchk = spoofchk;
4224
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304225 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304226 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304227 be_cmd_set_logical_link_config(adapter,
4228 IFLA_VF_LINK_STATE_AUTO,
4229 vf+1);
4230 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004231 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004232
4233 if (!old_vfs) {
4234 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4235 if (status) {
4236 dev_err(dev, "SRIOV enable failed\n");
4237 adapter->num_vfs = 0;
4238 goto err;
4239 }
4240 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304241
Somnath Kotur884476b2016-06-22 08:54:55 -04004242 if (BE3_chip(adapter)) {
4243 /* On BE3, enable VEB only when SRIOV is enabled */
4244 status = be_cmd_set_hsw_config(adapter, 0, 0,
4245 adapter->if_handle,
4246 PORT_FWD_TYPE_VEB, 0);
4247 if (status)
4248 goto err;
4249 }
4250
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304251 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004252 return 0;
4253err:
Sathya Perla4c876612013-02-03 20:30:11 +00004254 dev_err(dev, "VF setup failed\n");
4255 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004256 return status;
4257}
4258
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304259/* Converting function_mode bits on BE3 to SH mc_type enums */
4260
4261static u8 be_convert_mc_type(u32 function_mode)
4262{
Suresh Reddy66064db2014-06-23 16:41:29 +05304263 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304264 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304265 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304266 return FLEX10;
4267 else if (function_mode & VNIC_MODE)
4268 return vNIC2;
4269 else if (function_mode & UMC_ENABLED)
4270 return UMC;
4271 else
4272 return MC_NONE;
4273}
4274
Sathya Perla92bf14a2013-08-27 16:57:32 +05304275/* On BE2/BE3 FW does not suggest the supported limits */
4276static void BEx_get_resources(struct be_adapter *adapter,
4277 struct be_resources *res)
4278{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304279 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304280
4281 if (be_physfn(adapter))
4282 res->max_uc_mac = BE_UC_PMAC_COUNT;
4283 else
4284 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4285
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304286 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4287
4288 if (be_is_mc(adapter)) {
4289 /* Assuming that there are 4 channels per port,
4290 * when multi-channel is enabled
4291 */
4292 if (be_is_qnq_mode(adapter))
4293 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4294 else
4295 /* In a non-qnq multichannel mode, the pvid
4296 * takes up one vlan entry
4297 */
4298 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4299 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304300 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304301 }
4302
Sathya Perla92bf14a2013-08-27 16:57:32 +05304303 res->max_mcast_mac = BE_MAX_MC;
4304
Vasundhara Volama5243da2014-03-11 18:53:07 +05304305 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4306 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4307 * *only* if it is RSS-capable.
4308 */
4309 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004310 be_virtfn(adapter) ||
4311 (be_is_mc(adapter) &&
4312 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304313 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304314 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4315 struct be_resources super_nic_res = {0};
4316
4317 /* On a SuperNIC profile, the driver needs to use the
4318 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4319 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004320 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4321 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4322 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304323 /* Some old versions of BE3 FW don't report max_tx_qs value */
4324 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4325 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304326 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304327 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304328
4329 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4330 !use_sriov && be_physfn(adapter))
4331 res->max_rss_qs = (adapter->be3_native) ?
4332 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4333 res->max_rx_qs = res->max_rss_qs + 1;
4334
Suresh Reddye3dc8672014-01-06 13:02:25 +05304335 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304336 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304337 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4338 else
4339 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304340
4341 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004342 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304343 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4344 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4345}
4346
Sathya Perla30128032011-11-10 19:17:57 +00004347static void be_setup_init(struct be_adapter *adapter)
4348{
4349 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004350 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004351 adapter->if_handle = -1;
4352 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004353 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304354 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004355 if (be_physfn(adapter))
4356 adapter->cmd_privileges = MAX_PRIVILEGES;
4357 else
4358 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004359}
4360
Somnath Koturde2b1e02016-06-06 07:22:10 -04004361/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4362 * However, this HW limitation is not exposed to the host via any SLI cmd.
4363 * As a result, in the case of SRIOV and in particular multi-partition configs
4364 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4365 * for distribution between the VFs. This self-imposed limit will determine the
4366 * no: of VFs for which RSS can be enabled.
4367 */
Baoyou Xied766e7e2016-09-18 16:35:29 +08004368static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
Somnath Koturde2b1e02016-06-06 07:22:10 -04004369{
4370 struct be_port_resources port_res = {0};
4371 u8 rss_tables_on_port;
4372 u16 max_vfs = be_max_vfs(adapter);
4373
4374 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4375 RESOURCE_LIMITS, 0);
4376
4377 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4378
4379 /* Each PF Pool's RSS Tables limit =
4380 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4381 */
4382 adapter->pool_res.max_rss_tables =
4383 max_vfs * rss_tables_on_port / port_res.max_vfs;
4384}
4385
Vasundhara Volambec84e62014-06-30 13:01:32 +05304386static int be_get_sriov_config(struct be_adapter *adapter)
4387{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304388 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304389 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304390
Somnath Koturde2b1e02016-06-06 07:22:10 -04004391 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4392 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304393
Vasundhara Volamace40af2015-03-04 00:44:34 -05004394 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304395 if (BE3_chip(adapter) && !res.max_vfs) {
4396 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4397 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4398 }
4399
Sathya Perlad3d18312014-08-01 17:47:30 +05304400 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304401
Vasundhara Volamace40af2015-03-04 00:44:34 -05004402 /* If during previous unload of the driver, the VFs were not disabled,
4403 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4404 * Instead use the TotalVFs value stored in the pci-dev struct.
4405 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304406 old_vfs = pci_num_vf(adapter->pdev);
4407 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004408 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4409 old_vfs);
4410
4411 adapter->pool_res.max_vfs =
4412 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304413 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304414 }
4415
Somnath Koturde2b1e02016-06-06 07:22:10 -04004416 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4417 be_calculate_pf_pool_rss_tables(adapter);
4418 dev_info(&adapter->pdev->dev,
4419 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4420 be_max_pf_pool_rss_tables(adapter));
4421 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304422 return 0;
4423}
4424
Vasundhara Volamace40af2015-03-04 00:44:34 -05004425static void be_alloc_sriov_res(struct be_adapter *adapter)
4426{
4427 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004428 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004429 int status;
4430
4431 be_get_sriov_config(adapter);
4432
4433 if (!old_vfs)
4434 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4435
4436 /* When the HW is in SRIOV capable configuration, the PF-pool
4437 * resources are given to PF during driver load, if there are no
4438 * old VFs. This facility is not available in BE3 FW.
4439 * Also, this is done by FW in Lancer chip.
4440 */
4441 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004442 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004443 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004444 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004445 if (status)
4446 dev_err(&adapter->pdev->dev,
4447 "Failed to optimize SRIOV resources\n");
4448 }
4449}
4450
Sathya Perla92bf14a2013-08-27 16:57:32 +05304451static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004452{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304453 struct device *dev = &adapter->pdev->dev;
4454 struct be_resources res = {0};
4455 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004456
Sathya Perla92bf14a2013-08-27 16:57:32 +05304457 /* For Lancer, SH etc read per-function resource limits from FW.
4458 * GET_FUNC_CONFIG returns per function guaranteed limits.
4459 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4460 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004461 if (BEx_chip(adapter)) {
4462 BEx_get_resources(adapter, &res);
4463 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304464 status = be_cmd_get_func_config(adapter, &res);
4465 if (status)
4466 return status;
4467
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004468 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4469 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4470 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4471 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004472 }
4473
Sathya Perlace7faf02016-06-22 08:54:53 -04004474 /* If RoCE is supported stash away half the EQs for RoCE */
4475 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4476 res.max_evt_qs / 2 : res.max_evt_qs;
4477 adapter->res = res;
4478
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004479 /* If FW supports RSS default queue, then skip creating non-RSS
4480 * queue for non-IP traffic.
4481 */
4482 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4483 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4484
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304485 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4486 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004487 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304488 be_max_vfs(adapter));
4489 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4490 be_max_uc(adapter), be_max_mc(adapter),
4491 be_max_vlans(adapter));
4492
Sathya Perlae2617682016-06-22 08:54:54 -04004493 /* Ensure RX and TX queues are created in pairs at init time */
4494 adapter->cfg_num_rx_irqs =
4495 min_t(u16, netif_get_num_default_rss_queues(),
4496 be_max_qp_irqs(adapter));
4497 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304498 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004499}
4500
Sathya Perla39f1d942012-05-08 19:41:24 +00004501static int be_get_config(struct be_adapter *adapter)
4502{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004503 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304504 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004505
Suresh Reddy980df242015-12-30 01:29:03 -05004506 status = be_cmd_get_cntl_attributes(adapter);
4507 if (status)
4508 return status;
4509
Kalesh APe97e3cd2014-07-17 16:20:26 +05304510 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004511 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304512 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004513
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004514 if (!lancer_chip(adapter) && be_physfn(adapter))
4515 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4516
Sathya Perla6b085ba2015-02-23 04:20:09 -05004517 if (BEx_chip(adapter)) {
4518 level = be_cmd_get_fw_log_level(adapter);
4519 adapter->msg_enable =
4520 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4521 }
4522
4523 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004524 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4525 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004526
Vasundhara Volam21252372015-02-06 08:18:42 -05004527 be_cmd_query_port_name(adapter);
4528
4529 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304530 status = be_cmd_get_active_profile(adapter, &profile_id);
4531 if (!status)
4532 dev_info(&adapter->pdev->dev,
4533 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304534 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304535
Sathya Perla92bf14a2013-08-27 16:57:32 +05304536 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004537}
4538
Sathya Perla95046b92013-07-23 15:25:02 +05304539static int be_mac_setup(struct be_adapter *adapter)
4540{
4541 u8 mac[ETH_ALEN];
4542 int status;
4543
4544 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4545 status = be_cmd_get_perm_mac(adapter, mac);
4546 if (status)
4547 return status;
4548
4549 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4550 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304551 }
4552
Sathya Perla95046b92013-07-23 15:25:02 +05304553 return 0;
4554}
4555
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304556static void be_schedule_worker(struct be_adapter *adapter)
4557{
Sathya Perlab7172412016-07-27 05:26:18 -04004558 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304559 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4560}
4561
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304562static void be_destroy_err_recovery_workq(void)
4563{
4564 if (!be_err_recovery_workq)
4565 return;
4566
4567 flush_workqueue(be_err_recovery_workq);
4568 destroy_workqueue(be_err_recovery_workq);
4569 be_err_recovery_workq = NULL;
4570}
4571
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304572static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004573{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304574 struct be_error_recovery *err_rec = &adapter->error_recovery;
4575
4576 if (!be_err_recovery_workq)
4577 return;
4578
4579 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4580 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004581 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4582}
4583
Sathya Perla77071332013-08-27 16:57:34 +05304584static int be_setup_queues(struct be_adapter *adapter)
4585{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304586 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304587 int status;
4588
4589 status = be_evt_queues_create(adapter);
4590 if (status)
4591 goto err;
4592
4593 status = be_tx_qs_create(adapter);
4594 if (status)
4595 goto err;
4596
4597 status = be_rx_cqs_create(adapter);
4598 if (status)
4599 goto err;
4600
4601 status = be_mcc_queues_create(adapter);
4602 if (status)
4603 goto err;
4604
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304605 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4606 if (status)
4607 goto err;
4608
4609 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4610 if (status)
4611 goto err;
4612
Sathya Perla77071332013-08-27 16:57:34 +05304613 return 0;
4614err:
4615 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4616 return status;
4617}
4618
Ajit Khaparde62219062016-02-10 22:45:53 +05304619static int be_if_create(struct be_adapter *adapter)
4620{
4621 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4622 u32 cap_flags = be_if_cap_flags(adapter);
4623 int status;
4624
Sathya Perlab7172412016-07-27 05:26:18 -04004625 /* alloc required memory for other filtering fields */
4626 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4627 sizeof(*adapter->pmac_id), GFP_KERNEL);
4628 if (!adapter->pmac_id)
4629 return -ENOMEM;
4630
4631 adapter->mc_list = kcalloc(be_max_mc(adapter),
4632 sizeof(*adapter->mc_list), GFP_KERNEL);
4633 if (!adapter->mc_list)
4634 return -ENOMEM;
4635
4636 adapter->uc_list = kcalloc(be_max_uc(adapter),
4637 sizeof(*adapter->uc_list), GFP_KERNEL);
4638 if (!adapter->uc_list)
4639 return -ENOMEM;
4640
Sathya Perlae2617682016-06-22 08:54:54 -04004641 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304642 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4643
4644 en_flags &= cap_flags;
4645 /* will enable all the needed filter flags in be_open() */
4646 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4647 &adapter->if_handle, 0);
4648
Sathya Perlab7172412016-07-27 05:26:18 -04004649 if (status)
4650 return status;
4651
4652 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304653}
4654
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304655int be_update_queues(struct be_adapter *adapter)
4656{
4657 struct net_device *netdev = adapter->netdev;
4658 int status;
4659
4660 if (netif_running(netdev))
4661 be_close(netdev);
4662
4663 be_cancel_worker(adapter);
4664
4665 /* If any vectors have been shared with RoCE we cannot re-program
4666 * the MSIx table.
4667 */
4668 if (!adapter->num_msix_roce_vec)
4669 be_msix_disable(adapter);
4670
4671 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304672 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4673 if (status)
4674 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304675
4676 if (!msix_enabled(adapter)) {
4677 status = be_msix_enable(adapter);
4678 if (status)
4679 return status;
4680 }
4681
Ajit Khaparde62219062016-02-10 22:45:53 +05304682 status = be_if_create(adapter);
4683 if (status)
4684 return status;
4685
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304686 status = be_setup_queues(adapter);
4687 if (status)
4688 return status;
4689
4690 be_schedule_worker(adapter);
4691
4692 if (netif_running(netdev))
4693 status = be_open(netdev);
4694
4695 return status;
4696}
4697
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004698static inline int fw_major_num(const char *fw_ver)
4699{
4700 int fw_major = 0, i;
4701
4702 i = sscanf(fw_ver, "%d.", &fw_major);
4703 if (i != 1)
4704 return 0;
4705
4706 return fw_major;
4707}
4708
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304709/* If it is error recovery, FLR the PF
4710 * Else if any VFs are already enabled don't FLR the PF
4711 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004712static bool be_reset_required(struct be_adapter *adapter)
4713{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304714 if (be_error_recovering(adapter))
4715 return true;
4716 else
4717 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004718}
4719
4720/* Wait for the FW to be ready and perform the required initialization */
4721static int be_func_init(struct be_adapter *adapter)
4722{
4723 int status;
4724
4725 status = be_fw_wait_ready(adapter);
4726 if (status)
4727 return status;
4728
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304729 /* FW is now ready; clear errors to allow cmds/doorbell */
4730 be_clear_error(adapter, BE_CLEAR_ALL);
4731
Sathya Perlaf962f842015-02-23 04:20:16 -05004732 if (be_reset_required(adapter)) {
4733 status = be_cmd_reset_function(adapter);
4734 if (status)
4735 return status;
4736
4737 /* Wait for interrupts to quiesce after an FLR */
4738 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004739 }
4740
4741 /* Tell FW we're ready to fire cmds */
4742 status = be_cmd_fw_init(adapter);
4743 if (status)
4744 return status;
4745
4746 /* Allow interrupts for other ULPs running on NIC function */
4747 be_intr_set(adapter, true);
4748
4749 return 0;
4750}
4751
Sathya Perla5fb379e2009-06-18 00:02:59 +00004752static int be_setup(struct be_adapter *adapter)
4753{
Sathya Perla39f1d942012-05-08 19:41:24 +00004754 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004755 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004756
Sathya Perlaf962f842015-02-23 04:20:16 -05004757 status = be_func_init(adapter);
4758 if (status)
4759 return status;
4760
Sathya Perla30128032011-11-10 19:17:57 +00004761 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004762
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004763 if (!lancer_chip(adapter))
4764 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004765
Suresh Reddy980df242015-12-30 01:29:03 -05004766 /* invoke this cmd first to get pf_num and vf_num which are needed
4767 * for issuing profile related cmds
4768 */
4769 if (!BEx_chip(adapter)) {
4770 status = be_cmd_get_func_config(adapter, NULL);
4771 if (status)
4772 return status;
4773 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004774
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004775 status = be_get_config(adapter);
4776 if (status)
4777 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004778
Somnath Koturde2b1e02016-06-06 07:22:10 -04004779 if (!BE2_chip(adapter) && be_physfn(adapter))
4780 be_alloc_sriov_res(adapter);
4781
4782 status = be_get_resources(adapter);
4783 if (status)
4784 goto err;
4785
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004786 status = be_msix_enable(adapter);
4787 if (status)
4788 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004789
Kalesh APbcc84142015-08-05 03:27:48 -04004790 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304791 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004792 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004793 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004794
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304795 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4796 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304797 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304798 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004799 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004800 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004801
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004802 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004803
Sathya Perla95046b92013-07-23 15:25:02 +05304804 status = be_mac_setup(adapter);
4805 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004806 goto err;
4807
Kalesh APe97e3cd2014-07-17 16:20:26 +05304808 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304809 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004810
Somnath Koture9e2a902013-10-24 14:37:53 +05304811 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304812 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304813 adapter->fw_ver);
4814 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4815 }
4816
Kalesh AP00d594c2015-01-20 03:51:44 -05004817 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4818 adapter->rx_fc);
4819 if (status)
4820 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4821 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004822
Kalesh AP00d594c2015-01-20 03:51:44 -05004823 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4824 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004825
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304826 if (be_physfn(adapter))
4827 be_cmd_set_logical_link_config(adapter,
4828 IFLA_VF_LINK_STATE_AUTO, 0);
4829
Somnath Kotur884476b2016-06-22 08:54:55 -04004830 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4831 * confusing a linux bridge or OVS that it might be connected to.
4832 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4833 * when SRIOV is not enabled.
4834 */
4835 if (BE3_chip(adapter))
4836 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4837 PORT_FWD_TYPE_PASSTHRU, 0);
4838
Vasundhara Volambec84e62014-06-30 13:01:32 +05304839 if (adapter->num_vfs)
4840 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004841
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004842 status = be_cmd_get_phy_info(adapter);
4843 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004844 adapter->phy.fc_autoneg = 1;
4845
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304846 if (be_physfn(adapter) && !lancer_chip(adapter))
4847 be_cmd_set_features(adapter);
4848
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304849 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304850 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004851 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004852err:
4853 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004854 return status;
4855}
4856
Ivan Vecera66268732011-12-08 01:31:21 +00004857#ifdef CONFIG_NET_POLL_CONTROLLER
4858static void be_netpoll(struct net_device *netdev)
4859{
4860 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004861 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004862 int i;
4863
Sathya Perlae49cc342012-11-27 19:50:02 +00004864 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004865 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004866 napi_schedule(&eqo->napi);
4867 }
Ivan Vecera66268732011-12-08 01:31:21 +00004868}
4869#endif
4870
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004871int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4872{
4873 const struct firmware *fw;
4874 int status;
4875
4876 if (!netif_running(adapter->netdev)) {
4877 dev_err(&adapter->pdev->dev,
4878 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304879 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004880 }
4881
4882 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4883 if (status)
4884 goto fw_exit;
4885
4886 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4887
4888 if (lancer_chip(adapter))
4889 status = lancer_fw_download(adapter, fw);
4890 else
4891 status = be_fw_download(adapter, fw);
4892
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004893 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304894 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004895
Ajit Khaparde84517482009-09-04 03:12:16 +00004896fw_exit:
4897 release_firmware(fw);
4898 return status;
4899}
4900
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004901static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4902 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004903{
4904 struct be_adapter *adapter = netdev_priv(dev);
4905 struct nlattr *attr, *br_spec;
4906 int rem;
4907 int status = 0;
4908 u16 mode = 0;
4909
4910 if (!sriov_enabled(adapter))
4911 return -EOPNOTSUPP;
4912
4913 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004914 if (!br_spec)
4915 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004916
4917 nla_for_each_nested(attr, br_spec, rem) {
4918 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4919 continue;
4920
Thomas Grafb7c1a312014-11-26 13:42:17 +01004921 if (nla_len(attr) < sizeof(mode))
4922 return -EINVAL;
4923
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004924 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004925 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4926 return -EOPNOTSUPP;
4927
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004928 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4929 return -EINVAL;
4930
4931 status = be_cmd_set_hsw_config(adapter, 0, 0,
4932 adapter->if_handle,
4933 mode == BRIDGE_MODE_VEPA ?
4934 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004935 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004936 if (status)
4937 goto err;
4938
4939 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4940 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4941
4942 return status;
4943 }
4944err:
4945 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4946 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4947
4948 return status;
4949}
4950
4951static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004952 struct net_device *dev, u32 filter_mask,
4953 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004954{
4955 struct be_adapter *adapter = netdev_priv(dev);
4956 int status = 0;
4957 u8 hsw_mode;
4958
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004959 /* BE and Lancer chips support VEB mode only */
4960 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004961 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4962 if (!pci_sriov_get_totalvfs(adapter->pdev))
4963 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004964 hsw_mode = PORT_FWD_TYPE_VEB;
4965 } else {
4966 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004967 adapter->if_handle, &hsw_mode,
4968 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004969 if (status)
4970 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004971
4972 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4973 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004974 }
4975
4976 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4977 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004978 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004979 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004980}
4981
Sathya Perlab7172412016-07-27 05:26:18 -04004982static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4983 void (*func)(struct work_struct *))
4984{
4985 struct be_cmd_work *work;
4986
4987 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4988 if (!work) {
4989 dev_err(&adapter->pdev->dev,
4990 "be_work memory allocation failed\n");
4991 return NULL;
4992 }
4993
4994 INIT_WORK(&work->work, func);
4995 work->adapter = adapter;
4996 return work;
4997}
4998
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004999/* VxLAN offload Notes:
5000 *
5001 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5002 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5003 * is expected to work across all types of IP tunnels once exported. Skyhawk
5004 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305005 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5006 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5007 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005008 *
5009 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5010 * adds more than one port, disable offloads and don't re-enable them again
5011 * until after all the tunnels are removed.
5012 */
Sathya Perlab7172412016-07-27 05:26:18 -04005013static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305014{
Sathya Perlab7172412016-07-27 05:26:18 -04005015 struct be_cmd_work *cmd_work =
5016 container_of(work, struct be_cmd_work, work);
5017 struct be_adapter *adapter = cmd_work->adapter;
5018 struct net_device *netdev = adapter->netdev;
Sathya Perlac9c47142014-03-27 10:46:19 +05305019 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04005020 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305021 int status;
5022
Jiri Benc1e5b3112015-09-17 16:11:13 +02005023 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5024 adapter->vxlan_port_aliases++;
Sathya Perlab7172412016-07-27 05:26:18 -04005025 goto done;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005026 }
5027
Sathya Perlac9c47142014-03-27 10:46:19 +05305028 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305029 dev_info(dev,
5030 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005031 dev_info(dev, "Disabling VxLAN offloads\n");
5032 adapter->vxlan_port_count++;
5033 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305034 }
5035
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005036 if (adapter->vxlan_port_count++ >= 1)
Sathya Perlab7172412016-07-27 05:26:18 -04005037 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005038
Sathya Perlac9c47142014-03-27 10:46:19 +05305039 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5040 OP_CONVERT_NORMAL_TO_TUNNEL);
5041 if (status) {
5042 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5043 goto err;
5044 }
5045
5046 status = be_cmd_set_vxlan_port(adapter, port);
5047 if (status) {
5048 dev_warn(dev, "Failed to add VxLAN port\n");
5049 goto err;
5050 }
5051 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5052 adapter->vxlan_port = port;
5053
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005054 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5055 NETIF_F_TSO | NETIF_F_TSO6 |
5056 NETIF_F_GSO_UDP_TUNNEL;
5057 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305058 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005059
Sathya Perlac9c47142014-03-27 10:46:19 +05305060 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5061 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005062 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305063err:
5064 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04005065done:
5066 kfree(cmd_work);
Sathya Perlac9c47142014-03-27 10:46:19 +05305067}
5068
Sathya Perlab7172412016-07-27 05:26:18 -04005069static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305070{
Sathya Perlab7172412016-07-27 05:26:18 -04005071 struct be_cmd_work *cmd_work =
5072 container_of(work, struct be_cmd_work, work);
5073 struct be_adapter *adapter = cmd_work->adapter;
5074 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305075
5076 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005077 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305078
Jiri Benc1e5b3112015-09-17 16:11:13 +02005079 if (adapter->vxlan_port_aliases) {
5080 adapter->vxlan_port_aliases--;
Sathya Perlab7172412016-07-27 05:26:18 -04005081 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005082 }
5083
Sathya Perlac9c47142014-03-27 10:46:19 +05305084 be_disable_vxlan_offloads(adapter);
5085
5086 dev_info(&adapter->pdev->dev,
5087 "Disabled VxLAN offloads for UDP port %d\n",
5088 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005089done:
5090 adapter->vxlan_port_count--;
Sathya Perlab7172412016-07-27 05:26:18 -04005091out:
5092 kfree(cmd_work);
5093}
5094
5095static void be_cfg_vxlan_port(struct net_device *netdev,
5096 struct udp_tunnel_info *ti,
5097 void (*func)(struct work_struct *))
5098{
5099 struct be_adapter *adapter = netdev_priv(netdev);
5100 struct be_cmd_work *cmd_work;
5101
5102 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5103 return;
5104
5105 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5106 return;
5107
5108 cmd_work = be_alloc_work(adapter, func);
5109 if (cmd_work) {
5110 cmd_work->info.vxlan_port = ti->port;
5111 queue_work(be_wq, &cmd_work->work);
5112 }
5113}
5114
5115static void be_del_vxlan_port(struct net_device *netdev,
5116 struct udp_tunnel_info *ti)
5117{
5118 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5119}
5120
5121static void be_add_vxlan_port(struct net_device *netdev,
5122 struct udp_tunnel_info *ti)
5123{
5124 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305125}
Joe Stringer725d5482014-11-13 16:38:13 -08005126
Jesse Gross5f352272014-12-23 22:37:26 -08005127static netdev_features_t be_features_check(struct sk_buff *skb,
5128 struct net_device *dev,
5129 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005130{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305131 struct be_adapter *adapter = netdev_priv(dev);
5132 u8 l4_hdr = 0;
5133
5134 /* The code below restricts offload features for some tunneled packets.
5135 * Offload features for normal (non tunnel) packets are unchanged.
5136 */
5137 if (!skb->encapsulation ||
5138 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5139 return features;
5140
5141 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5142 * should disable tunnel offload features if it's not a VxLAN packet,
5143 * as tunnel offloads have been enabled only for VxLAN. This is done to
5144 * allow other tunneled traffic like GRE work fine while VxLAN
5145 * offloads are configured in Skyhawk-R.
5146 */
5147 switch (vlan_get_protocol(skb)) {
5148 case htons(ETH_P_IP):
5149 l4_hdr = ip_hdr(skb)->protocol;
5150 break;
5151 case htons(ETH_P_IPV6):
5152 l4_hdr = ipv6_hdr(skb)->nexthdr;
5153 break;
5154 default:
5155 return features;
5156 }
5157
5158 if (l4_hdr != IPPROTO_UDP ||
5159 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5160 skb->inner_protocol != htons(ETH_P_TEB) ||
5161 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5162 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08005163 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305164
5165 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005166}
Sathya Perlac9c47142014-03-27 10:46:19 +05305167
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305168static int be_get_phys_port_id(struct net_device *dev,
5169 struct netdev_phys_item_id *ppid)
5170{
5171 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5172 struct be_adapter *adapter = netdev_priv(dev);
5173 u8 *id;
5174
5175 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5176 return -ENOSPC;
5177
5178 ppid->id[0] = adapter->hba_port_num + 1;
5179 id = &ppid->id[1];
5180 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5181 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5182 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5183
5184 ppid->id_len = id_len;
5185
5186 return 0;
5187}
5188
Sathya Perlab7172412016-07-27 05:26:18 -04005189static void be_set_rx_mode(struct net_device *dev)
5190{
5191 struct be_adapter *adapter = netdev_priv(dev);
5192 struct be_cmd_work *work;
5193
5194 work = be_alloc_work(adapter, be_work_set_rx_mode);
5195 if (work)
5196 queue_work(be_wq, &work->work);
5197}
5198
stephen hemmingere5686ad2012-01-05 19:10:25 +00005199static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005200 .ndo_open = be_open,
5201 .ndo_stop = be_close,
5202 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005203 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005204 .ndo_set_mac_address = be_mac_addr_set,
5205 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005206 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005207 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005208 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5209 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005210 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005211 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005212 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005213 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305214 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005215 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005216#ifdef CONFIG_NET_POLL_CONTROLLER
5217 .ndo_poll_controller = be_netpoll,
5218#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005219 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5220 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305221#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305222 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305223#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005224 .ndo_udp_tunnel_add = be_add_vxlan_port,
5225 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005226 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305227 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005228};
5229
5230static void be_netdev_init(struct net_device *netdev)
5231{
5232 struct be_adapter *adapter = netdev_priv(netdev);
5233
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005234 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005235 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005236 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305237 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005238 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005239
5240 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005241 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005242
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005243 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005244 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005245
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005246 netdev->priv_flags |= IFF_UNICAST_FLT;
5247
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005248 netdev->flags |= IFF_MULTICAST;
5249
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305250 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005251
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005252 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005253
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005254 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005255}
5256
Kalesh AP87ac1a52015-02-23 04:20:15 -05005257static void be_cleanup(struct be_adapter *adapter)
5258{
5259 struct net_device *netdev = adapter->netdev;
5260
5261 rtnl_lock();
5262 netif_device_detach(netdev);
5263 if (netif_running(netdev))
5264 be_close(netdev);
5265 rtnl_unlock();
5266
5267 be_clear(adapter);
5268}
5269
Kalesh AP484d76f2015-02-23 04:20:14 -05005270static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005271{
Kalesh APd0e1b312015-02-23 04:20:12 -05005272 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005273 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005274
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005275 status = be_setup(adapter);
5276 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005277 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005278
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005279 rtnl_lock();
5280 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005281 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005282 rtnl_unlock();
5283
5284 if (status)
5285 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005286
Kalesh APd0e1b312015-02-23 04:20:12 -05005287 netif_device_attach(netdev);
5288
Kalesh AP484d76f2015-02-23 04:20:14 -05005289 return 0;
5290}
5291
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305292static void be_soft_reset(struct be_adapter *adapter)
5293{
5294 u32 val;
5295
5296 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5297 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5298 val |= SLIPORT_SOFTRESET_SR_MASK;
5299 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5300}
5301
5302static bool be_err_is_recoverable(struct be_adapter *adapter)
5303{
5304 struct be_error_recovery *err_rec = &adapter->error_recovery;
5305 unsigned long initial_idle_time =
5306 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5307 unsigned long recovery_interval =
5308 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5309 u16 ue_err_code;
5310 u32 val;
5311
5312 val = be_POST_stage_get(adapter);
5313 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5314 return false;
5315 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5316 if (ue_err_code == 0)
5317 return false;
5318
5319 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5320 ue_err_code);
5321
5322 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5323 dev_err(&adapter->pdev->dev,
5324 "Cannot recover within %lu sec from driver load\n",
5325 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5326 return false;
5327 }
5328
5329 if (err_rec->last_recovery_time &&
5330 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5331 dev_err(&adapter->pdev->dev,
5332 "Cannot recover within %lu sec from last recovery\n",
5333 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5334 return false;
5335 }
5336
5337 if (ue_err_code == err_rec->last_err_code) {
5338 dev_err(&adapter->pdev->dev,
5339 "Cannot recover from a consecutive TPE error\n");
5340 return false;
5341 }
5342
5343 err_rec->last_recovery_time = jiffies;
5344 err_rec->last_err_code = ue_err_code;
5345 return true;
5346}
5347
5348static int be_tpe_recover(struct be_adapter *adapter)
5349{
5350 struct be_error_recovery *err_rec = &adapter->error_recovery;
5351 int status = -EAGAIN;
5352 u32 val;
5353
5354 switch (err_rec->recovery_state) {
5355 case ERR_RECOVERY_ST_NONE:
5356 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5357 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5358 break;
5359
5360 case ERR_RECOVERY_ST_DETECT:
5361 val = be_POST_stage_get(adapter);
5362 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5363 POST_STAGE_RECOVERABLE_ERR) {
5364 dev_err(&adapter->pdev->dev,
5365 "Unrecoverable HW error detected: 0x%x\n", val);
5366 status = -EINVAL;
5367 err_rec->resched_delay = 0;
5368 break;
5369 }
5370
5371 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5372
5373 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5374 * milliseconds before it checks for final error status in
5375 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5376 * If it does, then PF0 initiates a Soft Reset.
5377 */
5378 if (adapter->pf_num == 0) {
5379 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5380 err_rec->resched_delay = err_rec->ue_to_reset_time -
5381 ERR_RECOVERY_UE_DETECT_DURATION;
5382 break;
5383 }
5384
5385 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5386 err_rec->resched_delay = err_rec->ue_to_poll_time -
5387 ERR_RECOVERY_UE_DETECT_DURATION;
5388 break;
5389
5390 case ERR_RECOVERY_ST_RESET:
5391 if (!be_err_is_recoverable(adapter)) {
5392 dev_err(&adapter->pdev->dev,
5393 "Failed to meet recovery criteria\n");
5394 status = -EIO;
5395 err_rec->resched_delay = 0;
5396 break;
5397 }
5398 be_soft_reset(adapter);
5399 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5400 err_rec->resched_delay = err_rec->ue_to_poll_time -
5401 err_rec->ue_to_reset_time;
5402 break;
5403
5404 case ERR_RECOVERY_ST_PRE_POLL:
5405 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5406 err_rec->resched_delay = 0;
5407 status = 0; /* done */
5408 break;
5409
5410 default:
5411 status = -EINVAL;
5412 err_rec->resched_delay = 0;
5413 break;
5414 }
5415
5416 return status;
5417}
5418
Kalesh AP484d76f2015-02-23 04:20:14 -05005419static int be_err_recover(struct be_adapter *adapter)
5420{
Kalesh AP484d76f2015-02-23 04:20:14 -05005421 int status;
5422
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305423 if (!lancer_chip(adapter)) {
5424 if (!adapter->error_recovery.recovery_supported ||
5425 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5426 return -EIO;
5427 status = be_tpe_recover(adapter);
5428 if (status)
5429 goto err;
5430 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305431
5432 /* Wait for adapter to reach quiescent state before
5433 * destroying queues
5434 */
5435 status = be_fw_wait_ready(adapter);
5436 if (status)
5437 goto err;
5438
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305439 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5440
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305441 be_cleanup(adapter);
5442
Kalesh AP484d76f2015-02-23 04:20:14 -05005443 status = be_resume(adapter);
5444 if (status)
5445 goto err;
5446
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305447 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5448
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005449err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005450 return status;
5451}
5452
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005453static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005454{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305455 struct be_error_recovery *err_rec =
5456 container_of(work, struct be_error_recovery,
5457 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005458 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305459 container_of(err_rec, struct be_adapter,
5460 error_recovery);
5461 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305462 struct device *dev = &adapter->pdev->dev;
5463 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005464
5465 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305466 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305467 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005468
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305469 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305470 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305471 err_rec->recovery_retries = 0;
5472 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305473 dev_info(dev, "Adapter recovery successful\n");
5474 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305475 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5476 /* BEx/SH recovery state machine */
5477 if (adapter->pf_num == 0 &&
5478 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5479 dev_err(&adapter->pdev->dev,
5480 "Adapter recovery in progress\n");
5481 resched_delay = err_rec->resched_delay;
5482 goto reschedule_task;
5483 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305484 /* For VFs, check if PF have allocated resources
5485 * every second.
5486 */
5487 dev_err(dev, "Re-trying adapter recovery\n");
5488 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305489 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5490 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305491 /* In case of another error during recovery, it takes 30 sec
5492 * for adapter to come out of error. Retry error recovery after
5493 * this time interval.
5494 */
5495 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305496 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305497 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305498 } else {
5499 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305500 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005501 }
5502
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305503 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305504
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305505reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305506 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005507}
5508
Vasundhara Volam21252372015-02-06 08:18:42 -05005509static void be_log_sfp_info(struct be_adapter *adapter)
5510{
5511 int status;
5512
5513 status = be_cmd_query_sfp_info(adapter);
5514 if (!status) {
5515 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305516 "Port %c: %s Vendor: %s part no: %s",
5517 adapter->port_name,
5518 be_misconfig_evt_port_state[adapter->phy_state],
5519 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005520 adapter->phy.vendor_pn);
5521 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305522 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005523}
5524
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005525static void be_worker(struct work_struct *work)
5526{
5527 struct be_adapter *adapter =
5528 container_of(work, struct be_adapter, work.work);
5529 struct be_rx_obj *rxo;
5530 int i;
5531
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005532 if (be_physfn(adapter) &&
5533 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5534 be_cmd_get_die_temperature(adapter);
5535
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005536 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005537 * mcc completions
5538 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005539 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005540 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005541 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005542 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005543 goto reschedule;
5544 }
5545
5546 if (!adapter->stats_cmd_sent) {
5547 if (lancer_chip(adapter))
5548 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305549 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005550 else
5551 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5552 }
5553
5554 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305555 /* Replenish RX-queues starved due to memory
5556 * allocation failures.
5557 */
5558 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305559 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005560 }
5561
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005562 /* EQ-delay update for Skyhawk is done while notifying EQ */
5563 if (!skyhawk_chip(adapter))
5564 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005565
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305566 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005567 be_log_sfp_info(adapter);
5568
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005569reschedule:
5570 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005571 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005572}
5573
Sathya Perla78fad34e2015-02-23 04:20:08 -05005574static void be_unmap_pci_bars(struct be_adapter *adapter)
5575{
5576 if (adapter->csr)
5577 pci_iounmap(adapter->pdev, adapter->csr);
5578 if (adapter->db)
5579 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005580 if (adapter->pcicfg && adapter->pcicfg_mapped)
5581 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005582}
5583
5584static int db_bar(struct be_adapter *adapter)
5585{
Kalesh AP18c57c72015-05-06 05:30:38 -04005586 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005587 return 0;
5588 else
5589 return 4;
5590}
5591
5592static int be_roce_map_pci_bars(struct be_adapter *adapter)
5593{
5594 if (skyhawk_chip(adapter)) {
5595 adapter->roce_db.size = 4096;
5596 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5597 db_bar(adapter));
5598 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5599 db_bar(adapter));
5600 }
5601 return 0;
5602}
5603
5604static int be_map_pci_bars(struct be_adapter *adapter)
5605{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005606 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005607 u8 __iomem *addr;
5608 u32 sli_intf;
5609
5610 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5611 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5612 SLI_INTF_FAMILY_SHIFT;
5613 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5614
5615 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005616 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005617 if (!adapter->csr)
5618 return -ENOMEM;
5619 }
5620
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005621 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005622 if (!addr)
5623 goto pci_map_err;
5624 adapter->db = addr;
5625
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005626 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5627 if (be_physfn(adapter)) {
5628 /* PCICFG is the 2nd BAR in BE2 */
5629 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5630 if (!addr)
5631 goto pci_map_err;
5632 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005633 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005634 } else {
5635 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005636 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005637 }
5638 }
5639
Sathya Perla78fad34e2015-02-23 04:20:08 -05005640 be_roce_map_pci_bars(adapter);
5641 return 0;
5642
5643pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005644 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005645 be_unmap_pci_bars(adapter);
5646 return -ENOMEM;
5647}
5648
5649static void be_drv_cleanup(struct be_adapter *adapter)
5650{
5651 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5652 struct device *dev = &adapter->pdev->dev;
5653
5654 if (mem->va)
5655 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5656
5657 mem = &adapter->rx_filter;
5658 if (mem->va)
5659 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5660
5661 mem = &adapter->stats_cmd;
5662 if (mem->va)
5663 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5664}
5665
5666/* Allocate and initialize various fields in be_adapter struct */
5667static int be_drv_init(struct be_adapter *adapter)
5668{
5669 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5670 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5671 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5672 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5673 struct device *dev = &adapter->pdev->dev;
5674 int status = 0;
5675
5676 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305677 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5678 &mbox_mem_alloc->dma,
5679 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005680 if (!mbox_mem_alloc->va)
5681 return -ENOMEM;
5682
5683 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5684 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5685 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005686
5687 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5688 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5689 &rx_filter->dma, GFP_KERNEL);
5690 if (!rx_filter->va) {
5691 status = -ENOMEM;
5692 goto free_mbox;
5693 }
5694
5695 if (lancer_chip(adapter))
5696 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5697 else if (BE2_chip(adapter))
5698 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5699 else if (BE3_chip(adapter))
5700 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5701 else
5702 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5703 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5704 &stats_cmd->dma, GFP_KERNEL);
5705 if (!stats_cmd->va) {
5706 status = -ENOMEM;
5707 goto free_rx_filter;
5708 }
5709
5710 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005711 mutex_init(&adapter->mcc_lock);
5712 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005713 spin_lock_init(&adapter->mcc_cq_lock);
5714 init_completion(&adapter->et_cmd_compl);
5715
5716 pci_save_state(adapter->pdev);
5717
5718 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305719
5720 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5721 adapter->error_recovery.resched_delay = 0;
5722 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005723 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005724
5725 adapter->rx_fc = true;
5726 adapter->tx_fc = true;
5727
5728 /* Must be a power of 2 or else MODULO will BUG_ON */
5729 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005730
5731 return 0;
5732
5733free_rx_filter:
5734 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5735free_mbox:
5736 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5737 mbox_mem_alloc->dma);
5738 return status;
5739}
5740
5741static void be_remove(struct pci_dev *pdev)
5742{
5743 struct be_adapter *adapter = pci_get_drvdata(pdev);
5744
5745 if (!adapter)
5746 return;
5747
5748 be_roce_dev_remove(adapter);
5749 be_intr_set(adapter, false);
5750
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005751 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005752
5753 unregister_netdev(adapter->netdev);
5754
5755 be_clear(adapter);
5756
Somnath Koturf72099e2016-09-07 19:57:50 +05305757 if (!pci_vfs_assigned(adapter->pdev))
5758 be_cmd_reset_function(adapter);
5759
Sathya Perla78fad34e2015-02-23 04:20:08 -05005760 /* tell fw we're done with firing cmds */
5761 be_cmd_fw_clean(adapter);
5762
5763 be_unmap_pci_bars(adapter);
5764 be_drv_cleanup(adapter);
5765
5766 pci_disable_pcie_error_reporting(pdev);
5767
5768 pci_release_regions(pdev);
5769 pci_disable_device(pdev);
5770
5771 free_netdev(adapter->netdev);
5772}
5773
Arnd Bergmann9a032592015-05-18 23:06:45 +02005774static ssize_t be_hwmon_show_temp(struct device *dev,
5775 struct device_attribute *dev_attr,
5776 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305777{
5778 struct be_adapter *adapter = dev_get_drvdata(dev);
5779
5780 /* Unit: millidegree Celsius */
5781 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5782 return -EIO;
5783 else
5784 return sprintf(buf, "%u\n",
5785 adapter->hwmon_info.be_on_die_temp * 1000);
5786}
5787
5788static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5789 be_hwmon_show_temp, NULL, 1);
5790
5791static struct attribute *be_hwmon_attrs[] = {
5792 &sensor_dev_attr_temp1_input.dev_attr.attr,
5793 NULL
5794};
5795
5796ATTRIBUTE_GROUPS(be_hwmon);
5797
Sathya Perlad3791422012-09-28 04:39:44 +00005798static char *mc_name(struct be_adapter *adapter)
5799{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305800 char *str = ""; /* default */
5801
5802 switch (adapter->mc_type) {
5803 case UMC:
5804 str = "UMC";
5805 break;
5806 case FLEX10:
5807 str = "FLEX10";
5808 break;
5809 case vNIC1:
5810 str = "vNIC-1";
5811 break;
5812 case nPAR:
5813 str = "nPAR";
5814 break;
5815 case UFP:
5816 str = "UFP";
5817 break;
5818 case vNIC2:
5819 str = "vNIC-2";
5820 break;
5821 default:
5822 str = "";
5823 }
5824
5825 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005826}
5827
5828static inline char *func_name(struct be_adapter *adapter)
5829{
5830 return be_physfn(adapter) ? "PF" : "VF";
5831}
5832
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005833static inline char *nic_name(struct pci_dev *pdev)
5834{
5835 switch (pdev->device) {
5836 case OC_DEVICE_ID1:
5837 return OC_NAME;
5838 case OC_DEVICE_ID2:
5839 return OC_NAME_BE;
5840 case OC_DEVICE_ID3:
5841 case OC_DEVICE_ID4:
5842 return OC_NAME_LANCER;
5843 case BE_DEVICE_ID2:
5844 return BE3_NAME;
5845 case OC_DEVICE_ID5:
5846 case OC_DEVICE_ID6:
5847 return OC_NAME_SH;
5848 default:
5849 return BE_NAME;
5850 }
5851}
5852
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005853static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005854{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005855 struct be_adapter *adapter;
5856 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005857 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005858
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305859 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5860
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005861 status = pci_enable_device(pdev);
5862 if (status)
5863 goto do_none;
5864
5865 status = pci_request_regions(pdev, DRV_NAME);
5866 if (status)
5867 goto disable_dev;
5868 pci_set_master(pdev);
5869
Sathya Perla7f640062012-06-05 19:37:20 +00005870 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305871 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005872 status = -ENOMEM;
5873 goto rel_reg;
5874 }
5875 adapter = netdev_priv(netdev);
5876 adapter->pdev = pdev;
5877 pci_set_drvdata(pdev, adapter);
5878 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005879 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005880
Russell King4c15c242013-06-26 23:49:11 +01005881 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005882 if (!status) {
5883 netdev->features |= NETIF_F_HIGHDMA;
5884 } else {
Russell King4c15c242013-06-26 23:49:11 +01005885 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005886 if (status) {
5887 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5888 goto free_netdev;
5889 }
5890 }
5891
Kalesh AP2f951a92014-09-12 17:39:21 +05305892 status = pci_enable_pcie_error_reporting(pdev);
5893 if (!status)
5894 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005895
Sathya Perla78fad34e2015-02-23 04:20:08 -05005896 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005897 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005898 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005899
Sathya Perla78fad34e2015-02-23 04:20:08 -05005900 status = be_drv_init(adapter);
5901 if (status)
5902 goto unmap_bars;
5903
Sathya Perla5fb379e2009-06-18 00:02:59 +00005904 status = be_setup(adapter);
5905 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005906 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005907
Sathya Perla3abcded2010-10-03 22:12:27 -07005908 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005909 status = register_netdev(netdev);
5910 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005911 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005912
Parav Pandit045508a2012-03-26 14:27:13 +00005913 be_roce_dev_add(adapter);
5914
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305915 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305916 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005917
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305918 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005919 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305920 adapter->hwmon_info.hwmon_dev =
5921 devm_hwmon_device_register_with_groups(&pdev->dev,
5922 DRV_NAME,
5923 adapter,
5924 be_hwmon_groups);
5925 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5926 }
5927
Sathya Perlad3791422012-09-28 04:39:44 +00005928 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005929 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005930
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005931 return 0;
5932
Sathya Perla5fb379e2009-06-18 00:02:59 +00005933unsetup:
5934 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005935drv_cleanup:
5936 be_drv_cleanup(adapter);
5937unmap_bars:
5938 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005939free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005940 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005941rel_reg:
5942 pci_release_regions(pdev);
5943disable_dev:
5944 pci_disable_device(pdev);
5945do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005946 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005947 return status;
5948}
5949
5950static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5951{
5952 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005953
Ajit Khaparded4360d62013-11-22 12:51:09 -06005954 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005955 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005956
Kalesh AP87ac1a52015-02-23 04:20:15 -05005957 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005958
5959 pci_save_state(pdev);
5960 pci_disable_device(pdev);
5961 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5962 return 0;
5963}
5964
Kalesh AP484d76f2015-02-23 04:20:14 -05005965static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005966{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005967 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005968 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005969
5970 status = pci_enable_device(pdev);
5971 if (status)
5972 return status;
5973
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005974 pci_restore_state(pdev);
5975
Kalesh AP484d76f2015-02-23 04:20:14 -05005976 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005977 if (status)
5978 return status;
5979
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305980 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005981
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005982 return 0;
5983}
5984
Sathya Perla82456b02010-02-17 01:35:37 +00005985/*
5986 * An FLR will stop BE from DMAing any data.
5987 */
5988static void be_shutdown(struct pci_dev *pdev)
5989{
5990 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005991
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005992 if (!adapter)
5993 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005994
Devesh Sharmad114f992014-06-10 19:32:15 +05305995 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005996 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005997 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005998
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005999 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006000
Ajit Khaparde57841862011-04-06 18:08:43 +00006001 be_cmd_reset_function(adapter);
6002
Sathya Perla82456b02010-02-17 01:35:37 +00006003 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006004}
6005
Sathya Perlacf588472010-02-14 21:22:01 +00006006static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05306007 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00006008{
6009 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006010
6011 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6012
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306013 be_roce_dev_remove(adapter);
6014
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306015 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6016 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00006017
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006018 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00006019
Kalesh AP87ac1a52015-02-23 04:20:15 -05006020 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006021 }
Sathya Perlacf588472010-02-14 21:22:01 +00006022
6023 if (state == pci_channel_io_perm_failure)
6024 return PCI_ERS_RESULT_DISCONNECT;
6025
6026 pci_disable_device(pdev);
6027
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006028 /* The error could cause the FW to trigger a flash debug dump.
6029 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006030 * can cause it not to recover; wait for it to finish.
6031 * Wait only for first function as it is needed only once per
6032 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006033 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006034 if (pdev->devfn == 0)
6035 ssleep(30);
6036
Sathya Perlacf588472010-02-14 21:22:01 +00006037 return PCI_ERS_RESULT_NEED_RESET;
6038}
6039
6040static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6041{
6042 struct be_adapter *adapter = pci_get_drvdata(pdev);
6043 int status;
6044
6045 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006046
6047 status = pci_enable_device(pdev);
6048 if (status)
6049 return PCI_ERS_RESULT_DISCONNECT;
6050
6051 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006052 pci_restore_state(pdev);
6053
6054 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006055 dev_info(&adapter->pdev->dev,
6056 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006057 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006058 if (status)
6059 return PCI_ERS_RESULT_DISCONNECT;
6060
Sathya Perlad6b6d982012-09-05 01:56:48 +00006061 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306062 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006063 return PCI_ERS_RESULT_RECOVERED;
6064}
6065
6066static void be_eeh_resume(struct pci_dev *pdev)
6067{
6068 int status = 0;
6069 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006070
6071 dev_info(&adapter->pdev->dev, "EEH resume\n");
6072
6073 pci_save_state(pdev);
6074
Kalesh AP484d76f2015-02-23 04:20:14 -05006075 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006076 if (status)
6077 goto err;
6078
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306079 be_roce_dev_add(adapter);
6080
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306081 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006082 return;
6083err:
6084 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006085}
6086
Vasundhara Volamace40af2015-03-04 00:44:34 -05006087static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6088{
6089 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006090 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006091 int status;
6092
6093 if (!num_vfs)
6094 be_vf_clear(adapter);
6095
6096 adapter->num_vfs = num_vfs;
6097
6098 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6099 dev_warn(&pdev->dev,
6100 "Cannot disable VFs while they are assigned\n");
6101 return -EBUSY;
6102 }
6103
6104 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6105 * are equally distributed across the max-number of VFs. The user may
6106 * request only a subset of the max-vfs to be enabled.
6107 * Based on num_vfs, redistribute the resources across num_vfs so that
6108 * each VF will have access to more number of resources.
6109 * This facility is not available in BE3 FW.
6110 * Also, this is done by FW in Lancer chip.
6111 */
6112 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006113 be_calculate_vf_res(adapter, adapter->num_vfs,
6114 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006115 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006116 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006117 if (status)
6118 dev_err(&pdev->dev,
6119 "Failed to optimize SR-IOV resources\n");
6120 }
6121
6122 status = be_get_resources(adapter);
6123 if (status)
6124 return be_cmd_status(status);
6125
6126 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6127 rtnl_lock();
6128 status = be_update_queues(adapter);
6129 rtnl_unlock();
6130 if (status)
6131 return be_cmd_status(status);
6132
6133 if (adapter->num_vfs)
6134 status = be_vf_setup(adapter);
6135
6136 if (!status)
6137 return adapter->num_vfs;
6138
6139 return 0;
6140}
6141
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006142static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006143 .error_detected = be_eeh_err_detected,
6144 .slot_reset = be_eeh_reset,
6145 .resume = be_eeh_resume,
6146};
6147
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006148static struct pci_driver be_driver = {
6149 .name = DRV_NAME,
6150 .id_table = be_dev_ids,
6151 .probe = be_probe,
6152 .remove = be_remove,
6153 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006154 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006155 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006156 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006157 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006158};
6159
6160static int __init be_init_module(void)
6161{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306162 int status;
6163
Joe Perches8e95a202009-12-03 07:58:21 +00006164 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6165 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006166 printk(KERN_WARNING DRV_NAME
6167 " : Module param rx_frag_size must be 2048/4096/8192."
6168 " Using 2048\n");
6169 rx_frag_size = 2048;
6170 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006171
Vasundhara Volamace40af2015-03-04 00:44:34 -05006172 if (num_vfs > 0) {
6173 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6174 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6175 }
6176
Sathya Perlab7172412016-07-27 05:26:18 -04006177 be_wq = create_singlethread_workqueue("be_wq");
6178 if (!be_wq) {
6179 pr_warn(DRV_NAME "workqueue creation failed\n");
6180 return -1;
6181 }
6182
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306183 be_err_recovery_workq =
6184 create_singlethread_workqueue("be_err_recover");
6185 if (!be_err_recovery_workq)
6186 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6187
6188 status = pci_register_driver(&be_driver);
6189 if (status) {
6190 destroy_workqueue(be_wq);
6191 be_destroy_err_recovery_workq();
6192 }
6193 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006194}
6195module_init(be_init_module);
6196
6197static void __exit be_exit_module(void)
6198{
6199 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006200
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306201 be_destroy_err_recovery_workq();
6202
Sathya Perlab7172412016-07-27 05:26:18 -04006203 if (be_wq)
6204 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006205}
6206module_exit(be_exit_module);